summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc6
-rw-r--r--.github/workflows/crmsh-cd.yml57
-rw-r--r--.github/workflows/crmsh-ci.yml368
-rw-r--r--.gitignore43
-rw-r--r--.hgignore7
-rw-r--r--.vscode/settings.json6
-rw-r--r--AUTHORS47
-rw-r--r--COPYING339
-rw-r--r--ChangeLog1775
-rw-r--r--Makefile.am81
-rw-r--r--NEWS0
-rw-r--r--README.md115
-rw-r--r--TODO34
-rwxr-xr-xautogen.sh144
-rwxr-xr-xbin/crm56
-rw-r--r--codecov.yml17
-rw-r--r--configure.ac62
-rw-r--r--contrib/README.vimsyntax35
-rw-r--r--contrib/bash_completion.sh256
-rwxr-xr-xcontrib/git-hook-pre-commit14
-rw-r--r--contrib/pcmk-ftdetect.vim2
-rw-r--r--contrib/pcmk.vim114
-rw-r--r--contrib/pygments_crmsh_lexers/__init__.py3
-rw-r--r--contrib/pygments_crmsh_lexers/ansiclr.py50
-rw-r--r--contrib/pygments_crmsh_lexers/crmsh.py88
-rw-r--r--contrib/setup.py33
-rw-r--r--crmsh.spec.in289
-rw-r--r--crmsh.tmpfiles.d.conf1
-rw-r--r--crmsh/__init__.py2
-rw-r--r--crmsh/bootstrap.py3079
-rw-r--r--crmsh/cache.py47
-rw-r--r--crmsh/cibconfig.py4045
-rw-r--r--crmsh/cibstatus.py391
-rw-r--r--crmsh/cibverify.py32
-rw-r--r--crmsh/clidisplay.py129
-rw-r--r--crmsh/cliformat.py371
-rw-r--r--crmsh/cmd_status.py145
-rw-r--r--crmsh/command.py593
-rw-r--r--crmsh/completers.py86
-rw-r--r--crmsh/config.py513
-rw-r--r--crmsh/constants.py538
-rw-r--r--crmsh/corosync.py784
-rw-r--r--crmsh/crash_test/__init__.py0
-rw-r--r--crmsh/crash_test/check.py320
-rw-r--r--crmsh/crash_test/config.py9
-rw-r--r--crmsh/crash_test/explain.py31
-rw-r--r--crmsh/crash_test/main.py209
-rw-r--r--crmsh/crash_test/task.py637
-rw-r--r--crmsh/crash_test/utils.py304
-rw-r--r--crmsh/crm_gv.py238
-rw-r--r--crmsh/crm_pssh.py165
-rw-r--r--crmsh/handles.py127
-rw-r--r--crmsh/healthcheck.py234
-rw-r--r--crmsh/help.py449
-rw-r--r--crmsh/history.py1056
-rw-r--r--crmsh/idmgmt.py193
-rw-r--r--crmsh/lock.py197
-rw-r--r--crmsh/log.py574
-rw-r--r--crmsh/log_patterns.py287
-rw-r--r--crmsh/logparser.py641
-rw-r--r--crmsh/logtime.py232
-rw-r--r--crmsh/main.py385
-rw-r--r--crmsh/minieval.py370
-rw-r--r--crmsh/ocfs2.py346
-rw-r--r--crmsh/options.py18
-rw-r--r--crmsh/ordereddict.py133
-rw-r--r--crmsh/orderedset.py102
-rw-r--r--crmsh/pacemaker.py368
-rw-r--r--crmsh/parallax.py76
-rw-r--r--crmsh/parse.py1841
-rw-r--r--crmsh/prun/__init__.py0
-rw-r--r--crmsh/prun/prun.py283
-rw-r--r--crmsh/prun/runner.py161
-rw-r--r--crmsh/pyshim.py21
-rw-r--r--crmsh/qdevice.py721
-rw-r--r--crmsh/ra.py977
-rw-r--r--crmsh/report/__init__.py0
-rw-r--r--crmsh/report/collect.py499
-rw-r--r--crmsh/report/constants.py102
-rw-r--r--crmsh/report/core.py510
-rw-r--r--crmsh/report/utils.py757
-rw-r--r--crmsh/rsctest.py478
-rw-r--r--crmsh/sbd.py633
-rw-r--r--crmsh/schema.py154
-rw-r--r--crmsh/scripts.py2169
-rw-r--r--crmsh/service_manager.py97
-rw-r--r--crmsh/sh.py479
-rw-r--r--crmsh/ssh_key.py267
-rw-r--r--crmsh/template.py183
-rw-r--r--crmsh/term.py180
-rw-r--r--crmsh/tmpfiles.py70
-rw-r--r--crmsh/ui_assist.py132
-rw-r--r--crmsh/ui_cib.py223
-rw-r--r--crmsh/ui_cibstatus.py100
-rw-r--r--crmsh/ui_cluster.py895
-rw-r--r--crmsh/ui_configure.py1241
-rw-r--r--crmsh/ui_context.py413
-rw-r--r--crmsh/ui_corosync.py174
-rw-r--r--crmsh/ui_history.py642
-rw-r--r--crmsh/ui_maintenance.py96
-rw-r--r--crmsh/ui_node.py620
-rw-r--r--crmsh/ui_options.py181
-rw-r--r--crmsh/ui_ra.py134
-rw-r--r--crmsh/ui_resource.py796
-rw-r--r--crmsh/ui_root.py204
-rw-r--r--crmsh/ui_script.py523
-rw-r--r--crmsh/ui_site.py81
-rw-r--r--crmsh/ui_template.py360
-rw-r--r--crmsh/ui_utils.py164
-rw-r--r--crmsh/upgradeutil.py194
-rw-r--r--crmsh/user_of_host.py122
-rw-r--r--crmsh/userdir.py74
-rw-r--r--crmsh/utils.py3150
-rw-r--r--crmsh/watchdog.py179
-rw-r--r--crmsh/xmlutil.py1575
-rw-r--r--data-manifest225
-rw-r--r--doc/bootstrap-howto.md206
-rw-r--r--doc/bootstrap-todo.md56
-rw-r--r--doc/crm.8.adoc5102
-rw-r--r--doc/crmsh_crm_report.8.adoc15
-rw-r--r--doc/development.md314
-rw-r--r--doc/profiles.adoc47
-rw-r--r--doc/releasing-a-new-version.md199
-rw-r--r--doc/sort-doc.py82
-rw-r--r--doc/website-v1/404.adoc9
-rw-r--r--doc/website-v1/Makefile145
-rw-r--r--doc/website-v1/about.adoc19
-rw-r--r--doc/website-v1/configuration.adoc132
-rw-r--r--doc/website-v1/crm.conf601
-rw-r--r--doc/website-v1/crmold.conf602
-rw-r--r--doc/website-v1/css/crm.css570
-rw-r--r--doc/website-v1/css/font-awesome.css1338
-rw-r--r--doc/website-v1/css/font-awesome.min.css4
-rw-r--r--doc/website-v1/development.adoc74
-rw-r--r--doc/website-v1/documentation.adoc42
-rw-r--r--doc/website-v1/download.adoc40
-rw-r--r--doc/website-v1/faq.adoc60
-rw-r--r--doc/website-v1/fonts/FontAwesome.otfbin0 -> 62856 bytes
-rwxr-xr-xdoc/website-v1/fonts/fontawesome-webfont.eotbin0 -> 38205 bytes
-rwxr-xr-xdoc/website-v1/fonts/fontawesome-webfont.svg414
-rwxr-xr-xdoc/website-v1/fonts/fontawesome-webfont.ttfbin0 -> 80652 bytes
-rwxr-xr-xdoc/website-v1/fonts/fontawesome-webfont.woffbin0 -> 44432 bytes
-rw-r--r--doc/website-v1/history-guide.adoc275
-rw-r--r--doc/website-v1/img/history-guide/sample-cluster.conf.pngbin0 -> 10009 bytes
-rw-r--r--doc/website-v1/img/history-guide/smallapache-start.pngbin0 -> 1146 bytes
-rw-r--r--doc/website-v1/img/icons/README5
-rw-r--r--doc/website-v1/img/icons/callouts/1.pngbin0 -> 329 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/10.pngbin0 -> 361 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/11.pngbin0 -> 565 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/12.pngbin0 -> 617 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/13.pngbin0 -> 623 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/14.pngbin0 -> 411 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/15.pngbin0 -> 640 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/2.pngbin0 -> 353 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/3.pngbin0 -> 350 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/4.pngbin0 -> 345 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/5.pngbin0 -> 348 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/6.pngbin0 -> 355 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/7.pngbin0 -> 344 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/8.pngbin0 -> 357 bytes
-rw-r--r--doc/website-v1/img/icons/callouts/9.pngbin0 -> 357 bytes
-rw-r--r--doc/website-v1/img/icons/caution.pngbin0 -> 2734 bytes
-rw-r--r--doc/website-v1/img/icons/example.pngbin0 -> 2599 bytes
-rw-r--r--doc/website-v1/img/icons/home.pngbin0 -> 1340 bytes
-rw-r--r--doc/website-v1/img/icons/important.pngbin0 -> 2980 bytes
-rw-r--r--doc/website-v1/img/icons/next.pngbin0 -> 1302 bytes
-rw-r--r--doc/website-v1/img/icons/note.pngbin0 -> 2494 bytes
-rw-r--r--doc/website-v1/img/icons/prev.pngbin0 -> 1348 bytes
-rw-r--r--doc/website-v1/img/icons/tip.pngbin0 -> 2718 bytes
-rw-r--r--doc/website-v1/img/icons/up.pngbin0 -> 1320 bytes
-rw-r--r--doc/website-v1/img/icons/warning.pngbin0 -> 3214 bytes
-rw-r--r--doc/website-v1/img/laptop.pngbin0 -> 2569 bytes
-rw-r--r--doc/website-v1/img/loader.gifbin0 -> 2545 bytes
-rw-r--r--doc/website-v1/img/servers.gifbin0 -> 4513 bytes
-rw-r--r--doc/website-v1/include/history-guide/basic-transition.typescript22
-rw-r--r--doc/website-v1/include/history-guide/diff.typescript11
-rw-r--r--doc/website-v1/include/history-guide/info.typescript16
-rw-r--r--doc/website-v1/include/history-guide/nfs-probe-err.typescript20
-rw-r--r--doc/website-v1/include/history-guide/resource-trace.typescript7
-rw-r--r--doc/website-v1/include/history-guide/resource.typescript6
-rw-r--r--doc/website-v1/include/history-guide/sample-cluster.conf.crm54
-rw-r--r--doc/website-v1/include/history-guide/status-probe-fail.typescript15
-rw-r--r--doc/website-v1/include/history-guide/stonith-corosync-stopped.typescript8
-rw-r--r--doc/website-v1/include/history-guide/transition-log.typescript13
-rw-r--r--doc/website-v1/index.adoc25
-rw-r--r--doc/website-v1/installation.adoc4
-rw-r--r--doc/website-v1/make-news.py136
-rw-r--r--doc/website-v1/man-1.2.adoc3437
-rw-r--r--doc/website-v1/man-2.0.adoc5048
-rw-r--r--doc/website-v1/man-3.adoc5309
-rw-r--r--doc/website-v1/man-4.3.adoc5160
-rw-r--r--doc/website-v1/news.adoc26
-rw-r--r--doc/website-v1/news/2014-06-30-release-2_1.adoc93
-rw-r--r--doc/website-v1/news/2014-10-28-release-2_1_1.adoc58
-rw-r--r--doc/website-v1/news/2015-01-26-release-2_1_2.adoc69
-rw-r--r--doc/website-v1/news/2015-04-10-release-2_1_3.adoc68
-rw-r--r--doc/website-v1/news/2015-05-13-release-2_1_4.adoc126
-rw-r--r--doc/website-v1/news/2015-05-25-getting-started-jp.adoc17
-rw-r--r--doc/website-v1/news/2016-01-12-release-2_1_5.adoc56
-rw-r--r--doc/website-v1/news/2016-01-15-release-2_2_0.adoc210
-rw-r--r--doc/website-v1/news/2016-04-28-release-2_2_1.adoc73
-rw-r--r--doc/website-v1/news/2016-08-12-release-2_3_0.adoc76
-rw-r--r--doc/website-v1/news/2016-09-01-release-2_1_7.adoc46
-rw-r--r--doc/website-v1/news/2016-09-02-release-2_3_1.adoc33
-rw-r--r--doc/website-v1/news/2016-09-05-release-2_2_2.adoc36
-rw-r--r--doc/website-v1/news/2017-01-31-release-3_0_0.adoc48
-rw-r--r--doc/website-v1/news/2021-06-17-release-4_3_1.adoc55
-rw-r--r--doc/website-v1/postprocess.py141
-rw-r--r--doc/website-v1/rsctest-guide.adoc238
-rw-r--r--doc/website-v1/scripts.adoc660
-rw-r--r--doc/website-v1/start-guide.adoc208
-rw-r--r--etc/crm.conf.in120
-rw-r--r--etc/profiles.yml29
-rw-r--r--pytest.ini4
-rw-r--r--requirements.txt3
-rw-r--r--scripts/apache/main.yml69
-rwxr-xr-xscripts/check-uptime/fetch.py7
-rw-r--r--scripts/check-uptime/main.yml19
-rwxr-xr-xscripts/check-uptime/report.py11
-rw-r--r--scripts/clvm-vg/main.yml74
-rw-r--r--scripts/clvm/main.yml39
-rw-r--r--scripts/cryptctl/README.md56
-rw-r--r--scripts/cryptctl/main.yml70
-rw-r--r--scripts/database/main.yml34
-rw-r--r--scripts/db2-hadr/main.yml43
-rw-r--r--scripts/db2/main.yml45
-rw-r--r--scripts/drbd/main.yml41
-rw-r--r--scripts/exportfs/main.yml37
-rw-r--r--scripts/filesystem/main.yml30
-rw-r--r--scripts/gfs2-base/main.yml27
-rw-r--r--scripts/gfs2/main.yml62
-rw-r--r--scripts/haproxy/haproxy.cfg13
-rw-r--r--scripts/haproxy/main.yml37
-rwxr-xr-xscripts/health/collect.py111
-rwxr-xr-xscripts/health/hahealth.py40
-rw-r--r--scripts/health/main.yml16
-rwxr-xr-xscripts/health/report.py134
-rw-r--r--scripts/libvirt/main.yml66
-rw-r--r--scripts/lvm-drbd/main.yml62
-rw-r--r--scripts/lvm/main.yml21
-rw-r--r--scripts/mailto/main.yml29
-rw-r--r--scripts/nfsserver-lvm-drbd/main.yml137
-rw-r--r--scripts/nfsserver/main.yml74
-rw-r--r--scripts/nginx/main.yml63
-rw-r--r--scripts/ocfs2/main.yml76
-rw-r--r--scripts/oracle/main.yml51
-rw-r--r--scripts/raid-lvm/main.yml25
-rw-r--r--scripts/raid1/main.yml17
-rw-r--r--scripts/sap-as/main.yml70
-rw-r--r--scripts/sap-ci/main.yml70
-rw-r--r--scripts/sap-db/main.yml63
-rw-r--r--scripts/sap-simple-stack-plus/main.yml220
-rw-r--r--scripts/sap-simple-stack/main.yml183
-rw-r--r--scripts/sapdb/main.yml32
-rw-r--r--scripts/sapinstance/main.yml48
-rw-r--r--scripts/sbd-device/main.yml63
-rw-r--r--scripts/sbd/main.yml37
-rw-r--r--scripts/virtual-ip/main.yml24
-rw-r--r--scripts/vmware/main.yml60
-rw-r--r--setup.py26
-rw-r--r--templates/apache61
-rw-r--r--templates/clvm59
-rw-r--r--templates/filesystem44
-rw-r--r--templates/gfs274
-rw-r--r--templates/gfs2-base46
-rw-r--r--templates/ocfs261
-rw-r--r--templates/sbd34
-rw-r--r--templates/virtual-ip39
-rw-r--r--test/README.regression154
-rw-r--r--test/bugs-test.txt11
-rwxr-xr-xtest/cib-tests.sh90
-rw-r--r--test/cibtests/001.exp.xml20
-rw-r--r--test/cibtests/001.input6
-rw-r--r--test/cibtests/002.exp.xml26
-rw-r--r--test/cibtests/002.input8
-rw-r--r--test/cibtests/003.exp.xml27
-rw-r--r--test/cibtests/003.input11
-rw-r--r--test/cibtests/004.exp.xml27
-rw-r--r--test/cibtests/004.input11
-rw-r--r--test/cibtests/shadow.base10
-rw-r--r--test/crm-interface89
-rw-r--r--test/defaults2
-rw-r--r--test/descriptions19
-rwxr-xr-xtest/evaltest.sh113
-rw-r--r--test/features/bootstrap_bugs.feature251
-rw-r--r--test/features/bootstrap_init_join_remove.feature205
-rw-r--r--test/features/bootstrap_options.feature165
-rw-r--r--test/features/bootstrap_sbd_delay.feature286
-rw-r--r--test/features/bootstrap_sbd_normal.feature272
-rw-r--r--test/features/cluster_api.feature143
-rw-r--r--test/features/configure_bugs.feature38
-rw-r--r--test/features/constraints_bugs.feature24
-rw-r--r--test/features/coveragerc4
-rw-r--r--test/features/crm_report_bugs.feature164
-rw-r--r--test/features/crm_report_normal.feature109
-rw-r--r--test/features/environment.py53
-rw-r--r--test/features/geo_setup.feature29
-rw-r--r--test/features/healthcheck.feature37
-rw-r--r--test/features/ocfs2.feature61
-rw-r--r--test/features/qdevice_options.feature50
-rw-r--r--test/features/qdevice_setup_remove.feature173
-rw-r--r--test/features/qdevice_usercase.feature87
-rw-r--r--test/features/qdevice_validate.feature161
-rw-r--r--test/features/resource_failcount.feature61
-rw-r--r--test/features/resource_set.feature154
-rw-r--r--test/features/ssh_agent.feature86
-rw-r--r--test/features/steps/__init__.py0
-rwxr-xr-xtest/features/steps/behave_agent.py134
-rw-r--r--test/features/steps/const.py353
-rw-r--r--test/features/steps/step_implementation.py575
-rw-r--r--test/features/steps/utils.py177
-rw-r--r--test/features/user_access.feature114
-rw-r--r--test/history-test.tar.bz2bin0 -> 706600 bytes
-rwxr-xr-xtest/list-undocumented-commands.py29
-rwxr-xr-xtest/profile-history.sh22
-rwxr-xr-xtest/regression.sh199
-rwxr-xr-xtest/run-functional-tests551
-rw-r--r--test/testcases/acl60
-rw-r--r--test/testcases/acl.excl1
-rw-r--r--test/testcases/acl.exp94
-rw-r--r--test/testcases/basicset18
-rw-r--r--test/testcases/bugs79
-rw-r--r--test/testcases/bugs.exp215
-rw-r--r--test/testcases/bundle20
-rw-r--r--test/testcases/bundle.exp57
-rw-r--r--test/testcases/commit39
-rw-r--r--test/testcases/commit.exp90
-rw-r--r--test/testcases/common.excl26
-rwxr-xr-xtest/testcases/common.filter9
-rw-r--r--test/testcases/confbasic91
-rw-r--r--test/testcases/confbasic-xml72
-rw-r--r--test/testcases/confbasic-xml.exp206
-rwxr-xr-xtest/testcases/confbasic-xml.filter2
-rw-r--r--test/testcases/confbasic.exp199
-rw-r--r--test/testcases/delete64
-rw-r--r--test/testcases/delete.exp194
-rw-r--r--test/testcases/edit95
-rw-r--r--test/testcases/edit.excl1
-rw-r--r--test/testcases/edit.exp437
-rw-r--r--test/testcases/file14
-rw-r--r--test/testcases/file.exp77
-rw-r--r--test/testcases/history42
-rw-r--r--test/testcases/history.excl3
-rw-r--r--test/testcases/history.exp600
-rwxr-xr-xtest/testcases/history.post3
-rwxr-xr-xtest/testcases/history.pre3
-rw-r--r--test/testcases/newfeatures44
-rw-r--r--test/testcases/newfeatures.exp81
-rw-r--r--test/testcases/node14
-rw-r--r--test/testcases/node.exp204
-rw-r--r--test/testcases/options23
-rw-r--r--test/testcases/options.exp64
-rw-r--r--test/testcases/ra7
-rw-r--r--test/testcases/ra.exp150
-rwxr-xr-xtest/testcases/ra.filter17
-rw-r--r--test/testcases/resource84
-rw-r--r--test/testcases/resource.exp1450
-rw-r--r--test/testcases/rset21
-rw-r--r--test/testcases/rset-xml19
-rw-r--r--test/testcases/rset-xml.exp53
-rw-r--r--test/testcases/rset.exp66
-rw-r--r--test/testcases/scripts14
-rw-r--r--test/testcases/scripts.exp305
-rwxr-xr-xtest/testcases/scripts.filter4
-rw-r--r--test/testcases/shadow10
-rw-r--r--test/testcases/shadow.exp24
-rwxr-xr-xtest/testcases/xmlonly.sh5
-rw-r--r--test/unittests/__init__.py64
-rw-r--r--test/unittests/bug-862577_corosync.conf51
-rw-r--r--test/unittests/corosync.conf.181
-rw-r--r--test/unittests/corosync.conf.258
-rw-r--r--test/unittests/corosync.conf.368
-rw-r--r--test/unittests/pacemaker.log923
-rw-r--r--test/unittests/pacemaker.log.23
-rw-r--r--test/unittests/pacemaker_unicode.log30
-rw-r--r--test/unittests/schemas/acls-1.1.rng66
-rw-r--r--test/unittests/schemas/acls-1.2.rng66
-rw-r--r--test/unittests/schemas/constraints-1.0.rng180
-rw-r--r--test/unittests/schemas/constraints-1.1.rng246
-rw-r--r--test/unittests/schemas/constraints-1.2.rng219
-rw-r--r--test/unittests/schemas/fencing.rng29
-rw-r--r--test/unittests/schemas/nvset.rng35
-rw-r--r--test/unittests/schemas/pacemaker-1.0.rng121
-rw-r--r--test/unittests/schemas/pacemaker-1.1.rng161
-rw-r--r--test/unittests/schemas/pacemaker-1.2.rng146
-rw-r--r--test/unittests/schemas/resources-1.0.rng177
-rw-r--r--test/unittests/schemas/resources-1.1.rng225
-rw-r--r--test/unittests/schemas/resources-1.2.rng225
-rw-r--r--test/unittests/schemas/rule.rng137
-rw-r--r--test/unittests/schemas/score.rng18
-rw-r--r--test/unittests/schemas/versions.rng24
-rw-r--r--test/unittests/scripts/inc1/main.yml22
-rw-r--r--test/unittests/scripts/inc2/main.yml26
-rw-r--r--test/unittests/scripts/legacy/main.yml52
-rw-r--r--test/unittests/scripts/templates/apache.xml36
-rw-r--r--test/unittests/scripts/templates/virtual-ip.xml62
-rw-r--r--test/unittests/scripts/unified/main.yml26
-rw-r--r--test/unittests/scripts/v2/main.yml46
-rw-r--r--test/unittests/scripts/vip/main.yml28
-rw-r--r--test/unittests/scripts/vipinc/main.yml14
-rw-r--r--test/unittests/scripts/workflows/10-webserver.xml50
-rw-r--r--test/unittests/test.conf12
-rw-r--r--test/unittests/test_bootstrap.py1905
-rw-r--r--test/unittests/test_bugs.py893
-rw-r--r--test/unittests/test_cib.py32
-rw-r--r--test/unittests/test_cliformat.py324
-rw-r--r--test/unittests/test_corosync.py488
-rw-r--r--test/unittests/test_crashtest_check.py790
-rw-r--r--test/unittests/test_crashtest_main.py215
-rw-r--r--test/unittests/test_crashtest_task.py777
-rw-r--r--test/unittests/test_crashtest_utils.py540
-rw-r--r--test/unittests/test_gv.py36
-rw-r--r--test/unittests/test_handles.py166
-rw-r--r--test/unittests/test_lock.py271
-rw-r--r--test/unittests/test_objset.py40
-rw-r--r--test/unittests/test_ocfs2.py465
-rw-r--r--test/unittests/test_parallax.py104
-rw-r--r--test/unittests/test_parse.py749
-rw-r--r--test/unittests/test_prun.py157
-rw-r--r--test/unittests/test_qdevice.py1031
-rw-r--r--test/unittests/test_ratrace.py131
-rw-r--r--test/unittests/test_report_collect.py588
-rw-r--r--test/unittests/test_report_core.py551
-rw-r--r--test/unittests/test_report_utils.py862
-rw-r--r--test/unittests/test_sbd.py894
-rw-r--r--test/unittests/test_scripts.py914
-rw-r--r--test/unittests/test_service_manager.py84
-rw-r--r--test/unittests/test_sh.py189
-rw-r--r--test/unittests/test_time.py24
-rw-r--r--test/unittests/test_ui_cluster.py173
-rw-r--r--test/unittests/test_upgradeuitl.py54
-rw-r--r--test/unittests/test_utils.py1514
-rw-r--r--test/unittests/test_watchdog.py311
-rw-r--r--test/unittests/test_xmlutil.py61
-rwxr-xr-xtest/update-expected-output.sh9
-rw-r--r--test_container/Dockerfile28
-rw-r--r--test_container/behave-agent.socket9
-rw-r--r--test_container/behave-agent@.service9
-rwxr-xr-xtest_container/behave_agent.py131
-rw-r--r--tox.ini31
-rwxr-xr-xupdate-data-manifest.sh28
-rwxr-xr-xutils/crm_clean.py46
-rw-r--r--utils/crm_init.py251
-rwxr-xr-xutils/crm_pkg.py342
-rwxr-xr-xutils/crm_rpmcheck.py72
-rw-r--r--utils/crm_script.py190
-rw-r--r--version.in1
447 files changed, 111731 insertions, 0 deletions
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..c4c841d
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,6 @@
+[run]
+omit =
+ */tests/*
+ */test/*
+ *setup.py*
+ tests/*
diff --git a/.github/workflows/crmsh-cd.yml b/.github/workflows/crmsh-cd.yml
new file mode 100644
index 0000000..48b03a0
--- /dev/null
+++ b/.github/workflows/crmsh-cd.yml
@@ -0,0 +1,57 @@
+# This workflow will install Python dependencies, run tests and lint with a single version of Python
+# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
+# For more information about secrets see: https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets
+
+name: crmsh CD
+
+on: push
+
+env:
+ PACKAGE_NAME: crmsh
+ CONTAINER_IMAGE: nyang23/obs-continuous-delivery:latest
+ OBS_USER: ${{ secrets.OBS_USER }}
+ OBS_PASS: ${{ secrets.OBS_PASS }}
+ OBS_PROJECT: ${{ secrets.OBS_PROJECT_CRMSH45 }}
+ TARGET_PROJECT: ${{ secrets.TARGET_PROJECT }}
+
+jobs:
+ integration:
+ if: github.repository == 'ClusterLabs/crmsh' && github.ref_name == 'crmsh-4.6'
+ uses: ./.github/workflows/crmsh-ci.yml
+
+ delivery:
+ if: github.repository == 'ClusterLabs/crmsh' && github.ref_name == 'crmsh-4.6'
+ needs: integration
+ runs-on: ubuntu-20.04
+ timeout-minutes: 10
+ steps:
+ - uses: actions/checkout@v3
+ - name: delivery process
+ run: |
+ docker pull "${CONTAINER_IMAGE}"
+ docker run -t -v "$(pwd)":/package:ro \
+ -e OBS_USER=$OBS_USER \
+ -e OBS_PASS=$OBS_PASS \
+ -e OBS_PROJECT=$OBS_PROJECT \
+ -e PACKAGE_NAME=$PACKAGE_NAME \
+ "${CONTAINER_IMAGE}" \
+ /bin/bash -c "cp -r /package ~/package && cd ~/package && /scripts/upload.sh"
+
+ submit:
+ if: github.repository == 'ClusterLabs/crmsh' && github.ref_name == 'crmsh-4.6'
+ needs: delivery
+ runs-on: ubuntu-20.04
+ timeout-minutes: 10
+ steps:
+ - uses: actions/checkout@v3
+ - name: submit process
+ run: |
+ docker pull "${CONTAINER_IMAGE}"
+ docker run -t \
+ -e OBS_USER=$OBS_USER \
+ -e OBS_PASS=$OBS_PASS \
+ -e OBS_PROJECT=$OBS_PROJECT \
+ -e PACKAGE_NAME=$PACKAGE_NAME \
+ -e TARGET_PROJECT=$TARGET_PROJECT \
+ "${CONTAINER_IMAGE}" \
+ /bin/bash -c "cd ~ && /scripts/submit.sh"
diff --git a/.github/workflows/crmsh-ci.yml b/.github/workflows/crmsh-ci.yml
new file mode 100644
index 0000000..f58ebd9
--- /dev/null
+++ b/.github/workflows/crmsh-ci.yml
@@ -0,0 +1,368 @@
+# This workflow will install Python dependencies, run tests and lint with a single version of Python
+# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
+# For more information about secrets see: https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets
+
+name: crmsh CI
+
+on:
+ - pull_request
+ - workflow_call
+
+env:
+ DOCKER_SCRIPT: ./test/run-functional-tests
+ GET_INDEX_OF: ./test/run-functional-tests _get_index_of
+
+jobs:
+ general_check:
+ runs-on: ubuntu-20.04
+ steps:
+ - uses: actions/checkout@v3
+ - name: check data-manifest
+ run: |
+ ./update-data-manifest.sh
+ output=`git --no-pager diff data-manifest`
+ [[ -z $output ]] || {
+ echo "$output"
+ echo "A new version of data-manifest is needed."
+ echo "Please run ./update-data-manifest.sh && git add ./data-manifest in your local environment and push the code again."
+ exit 1
+ }
+
+ unit_test:
+ runs-on: ubuntu-20.04
+ strategy:
+ matrix:
+ python-version: ['3.6', '3.8', '3.10']
+ fail-fast: false
+ timeout-minutes: 5
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install tox
+ - name: Test with pytest in tox
+ run: |
+ tox -v -e${{ matrix.python-version }}
+
+ functional_test_crm_report_bugs:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for crm_report bugs
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF crm_report_bugs`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_crm_report_normal:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for crm_report normal
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF crm_report_normal`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_bootstrap_bugs:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for bootstrap bugs
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF bootstrap_bugs`
+ $DOCKER_SCRIPT $index
+ - uses: codecov/codecov-action@v3
+
+ functional_test_bootstrap_bugs_non_root:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for bootstrap bugs, under non root user
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF bootstrap_bugs`
+ $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_bootstrap_common:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for bootstrap common
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF bootstrap_init_join_remove`
+ $DOCKER_SCRIPT $index
+ - uses: codecov/codecov-action@v3
+
+ functional_test_bootstrap_common_non_root:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for bootstrap common, under non root user
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF bootstrap_init_join_remove`
+ $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_bootstrap_options:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for bootstrap options
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF bootstrap_options`
+ $DOCKER_SCRIPT $index
+ - uses: codecov/codecov-action@v3
+
+ functional_test_bootstrap_options_non_root:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for bootstrap options, under non root user
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF bootstrap_options`
+ $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_qdevice_setup_remove:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for qdevice setup and remove
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF qdevice_setup_remove`
+ $DOCKER_SCRIPT $index
+ - uses: codecov/codecov-action@v3
+
+ functional_test_qdevice_setup_remove_non_root:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for qdevice setup and remove, under non root user
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF qdevice_setup_remove`
+ $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_qdevice_options:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for qdevice options
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF qdevice_options`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_qdevice_validate:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for qdevice validate
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF qdevice_validate`
+ $DOCKER_SCRIPT $index
+ - uses: codecov/codecov-action@v3
+
+ functional_test_qdevice_validate_non_root:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for qdevice validate, under non root user
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF qdevice_validate`
+ $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_qdevice_user_case:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for qdevice user case
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF qdevice_usercase`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_resource_failcount:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for resource failcount
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF resource_failcount`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_resource_set:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for resource set
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF resource_set`
+ $DOCKER_SCRIPT $index
+ - uses: codecov/codecov-action@v3
+
+ functional_test_resource_set_non_root:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for resource set, under non root user
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF resource_set`
+ $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_configure_sublevel:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for configure sublevel bugs
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF configure_bugs`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_constraints_bugs:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for constraints bugs
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF constraints_bugs`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_geo_cluster:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for geo cluster
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF geo_setup`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_healthcheck:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for healthcheck
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ index=`$GET_INDEX_OF healthcheck`
+ $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u
+ - uses: codecov/codecov-action@v3
+
+ functional_test_cluster_api:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for cluster api
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ $DOCKER_SCRIPT `$GET_INDEX_OF cluster_api`
+ - uses: codecov/codecov-action@v3
+
+ functional_test_user_access:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for user access
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ $DOCKER_SCRIPT `$GET_INDEX_OF user_access`
+ - uses: codecov/codecov-action@v3
+
+ functional_test_ssh_agent:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: functional test for user access
+ run: |
+ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json
+ sudo systemctl restart docker.service
+ $DOCKER_SCRIPT `$GET_INDEX_OF ssh_agent` && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT -u `$GET_INDEX_OF ssh_agent`
+ - uses: codecov/codecov-action@v3
+
+ original_regression_test:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v3
+ - name: original regression test
+ run: |
+ $DOCKER_SCRIPT `$GET_INDEX_OF "regression test"`
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..87ff9be
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,43 @@
+*.pyc
+*~
+#*.*#
+.#*
+doc/website-v1/gen
+Makefile.in
+autom4te.cache
+Makefile
+aclocal.m4
+autoconf
+autoheader
+automake
+config.log
+config.status
+configure
+crm.conf
+crmsh.spec
+install-sh
+missing
+version
+crmsh.egg-info/*
+crmtestout/*
+doc/crm.8
+doc/crm.8.html
+doc/crmsh_hb_report.8
+doc/crmsh_hb_report.8.html
+hb_report/hb_report
+patches/*
+build/*
+
+# Tool specific files
+.README.md.html
+.*.*~
+.project
+.settings
+.pydevproject
+.coverage
+
+contrib/build/
+contrib/dist/
+contrib/pygments_crmsh_lexers.egg-info/
+
+.tox/
diff --git a/.hgignore b/.hgignore
new file mode 100644
index 0000000..1afba4a
--- /dev/null
+++ b/.hgignore
@@ -0,0 +1,7 @@
+syntax: glob
+
+*.pyc
+*~
+#*.*#
+doc/gen
+
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..c5cb0ba
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,6 @@
+{
+ "python.linting.pylintEnabled": true,
+ "python.linting.flake8Enabled": false,
+ "python.linting.enabled": true,
+ "python.pythonPath": "/usr/bin/python3"
+} \ No newline at end of file
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..d0d5f0d
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,47 @@
+NOTE: The work of everyone on this project is dearly appreciated. If you
+ are not listed here but should be, please notify us!
+
+ afederic <afederic[at]gmail[dot]com>
+ Adam Spiers <aspiers[at]suse[dot]com>
+ Andrei Maruha <Andrei_Maruha[at]epam[dot]com>
+ Andrew Beekhof <andrew[at]beekhof[dot]net>
+ Bin Liu <bliu[at]suse[dot]com>
+ Borislav Borisov <borislav[dot]v[dot]borisov[at]gmail[dot]com>
+ Christian Seiler <christian[at]iwakd[dot]de>
+ Daniel Hoffend <dh[at]dotlan[dot]net>
+ Dejan Muhamedagic <dejan[at]suse[dot]de>
+ dougcahill <doug[dot]cahill[at]actifio[dot]com>
+ Eric Ren <zren[at]suse[dot]com>
+ Federica Teodori <federica[dot]teodori[at]googlemail[dot]com>
+ Florian Haas <florian[dot]haas[at]linbit[dot]com>
+ Goldwyn Rodrigues <rgoldwyn[at]novell[dot]com>
+ Hideo Yamauchi <renayama19661014[at]ybb[dot]ne[dot]jp>
+ Holger Teutsch <holger[dot]teutsch[at]web[dot]de>
+ Igor Tsiglyar <igor_tsiglyar[at]outlook[dot]com>
+ Kai Kang <kai[dot]kang[at]windriver[dot]com>
+ Kazunori INOUE <kazunori[dot]inoue3[at]gmail[dot]com>
+ Keisuke MORI <keisuke[dot]mori+ha[at]gmail[dot]com>
+ Kristoffer Gronlund <kgronlund[at]suse[dot]com>
+ Larry Chen <lchen[at]suse[dot]com>
+ Lars Ellenberg <lars[dot]ellenberg[at]linbit[dot]com>
+ Lars Marowsky-Brée <lmb[at]suse[dot]de>
+ Marc A. Smith <marc[at]astersmith[dot]com>
+ Michael Prokop <devnull[at]localhost>
+ Motaharu Kobu <mkubo[at]3ware[dot]co[dot]jp>
+ NAKAHIRA Kazutomo <nakahira[dot]kazutomo[at]oss[dot]ntt[dot]co[dot]jp>
+ Nate Clark <nate[at]neworld[dot]us>
+ nozawat <nozawat[at]gmail[dot]com>
+ Pedro Salgado <steenzout[at]saucelabs[dot]com>
+ Peter Schwindt <peter[at]schwindt-net[dot]de>
+ Richard B Winters <rik[at]mmogp[dot]com>
+ seabres <rainer[dot]brestan[at]gmx[dot]net>
+ Tim Serong <tserong[at]suse[dot]com>
+ Thomas Rohlajz <rohlik[at]3server[dot]cz>
+ Valentin Vidic <Valentin[dot]Vidic[at]CARNet[dot]hr>
+ Vincenzo Pii <piiv[at]zhaw[dot]ch>
+ Vladislav Bogdanov <bubble[at]hoster-ok[dot]com>
+ Xia Li <XLi[at]suse[dot]com>
+ Xin Liang <XLiang[at]suse[dot]com>
+ Xinwei Hu <xwhu[at]novell[dot]com>
+ Yan Gao <ygao[at]suse[dot]com>
+ Yuusuke IIDA <iidayuus[at]intellilink[dot]co[dot]jp>
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..d511905
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..dfc5912
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,1775 @@
+* Tue Jan 9 2024 Xin Liang <XLiang@suse.com>
+- Release 4.6.0
+- Fix: report: Unable to gather log files that are in the syslog format (bsc#1218491)
+- Dev: ui_corosync: Add a completer for corosync.set to enumerate all current paths
+- Dev: bootstrap: Assign hosts with _context.node_list_in_cluster in join_ssh_merge (bsc#1218331)
+
+* Fri Dec 22 2023 Xin Liang <XLiang@suse.com>
+- Release 4.6.0 rc2
+- Dev: ui_cluster: Move --use-ssh-agent to optional arguments
+- Fix: autoconf: --with-version does not override the variable used in `version.in`
+- Dev: unify version string used in setup.py and autotools
+- Fix: ui_cluster: Improve the process of 'crm cluster stop' (bsc#1213889)
+- Fix: scripts.health: call `setup_logging()` before importing crmsh.reprot.utils
+- Dev: log: save backtrace of ValueError in logfile and suppress it in console
+
+* Thu Dec 7 2023 Xin Liang <XLiang@suse.com>
+- Release 4.6.0 rc1
+- Dev: doc: Unify contents of manpage and help info
+- Dev: report: Rewrite crm report module
+- Dev: utils: To prevent shell injection, manipulate the argument array instead of the command line string
+- Fix: bootstrap: fix the owner and permission of file authorized_keys (bsc#1217279)
+- Fix: prun: should not call user_pair_for_ssh() when target host is localhost (bsc#1217094)
+- Dev: report: Redirect warning and error from remote node into stderr
+- Fix: utils: Add 'sudo' only when there is a sudoer(bsc#1215549)
+- Dev: xmlutil: refactor class CrmMonXmlParser
+- Dev: completers: Add online_nodes and standby_nodes
+- Fix: bootstrap: add informative logging for generating new ssh keypairs
+- Fix: forward ssh-agent for `crm report __slave`
+- Fix: sh: raise AuthorizationError and generate diagnose messages when ClusterShell fails with 255
+- Dev: bootstrap: allow to authenticate interactively in `crm cluster join --use-ssh-agent`
+- Dev: ssh-agent: add informative logging for adding keys to authorized_keys
+- Dev: ssh-agent: add diagnose messages
+- Dev: bootstrap: implement ssh-agent support for geo cluster (jsc#PED-5774)
+- Dev: bootstrap: refine key swap for user `hacluster`
+- Dev: bootstrap: implement ssh-agent support for qdevice (jsc#PED-5774)
+- Dev: bootstrap: implement ssh-agent support (jsc#PED-5774)
+- Dev: ui_utils: Supports '=' when setting node/resource attributes
+- Fix: report: Pick up tarball suffix dynamically (bsc#1215438)
+- Fix: report: Pick 'gzip' as the first compress prog for cross-platform compatibility(bsc#1215438)
+- Fix: constants: Add several resource meta attributes (bsc#1215319)
+- refactor: move get_stdout and get_stdout_stderr to crmsh.sh.ShellUtils
+- Dev: refactor shell calling routines
+- Dev: utils: strip new line when get_stdout_or_raise_error returns
+- Fix: prun: setsid to prevent ssh from asking a password from terminal
+- Fix: upgradeutil: reduce the timeout for getting sequence from remote node (bsc#1213797)
+- Fix: userdir: Get the effictive user name instead of using getpass.getuser (bsc#1213821)
+- Dev: requirements: remove parallax
+- Fix: upgradeutil: support the change of path of upgrade_seq in crmsh-4.5 (bsc#1213050)
+- Fix: ui_context: wait4dc should assume a subcommand completes successfully if no exceptions are raised (bsc#1212992)
+- Fix: upgradeutil: do not tried to upgrade when the cluster is partially set up
+- Fix: bootstrap: fix the validation of option -N and -c (bsc#1212436)
+- Fix: geo_cluster: the behavior of choosing a default user in geo_join/geo_init_arbitrator is different with `cluster join` (bsc#1211817)
+- Fix: utils: do not use sudoer user to create ssh session unless it is specified explicitly (bsc#1211817)
+- medium: ui_node: fix cib rollback race on node standby
+- Dev: ui_cluster: Use 'CustomAppendAction' instead of 'append' argparse action
+- Dev: bootstrap: Configure ssh key when fetch geo config
+- Dev: cmd_status: Use --output-as option instead of deprecated --as-html and --as-xml options
+- Dev: cmd_status: Append 'with quorum' and 'WITHOUT quorum' to display keyword list
+- Dev: spec: Remove python3-parallax from spec file
+- Fix: bootstrap: failed to save username for localhost when initializing a cluster with a qnet server
+- Fix: utils: fix `cluster diff --checksum`
+- Dev: ui_cluster: refine messages for `cluster run` and `cluster coy`
+- Dev: geo: implement non-root support for geo_fetch_config()
+- Fix: bootstrap: failed to join when the cluster is initialized with skip_csync2
+- Dev: prun: minor refactor and add comments
+- Dev: prun: implement timeout
+- Dev: prun: add an concurrency limiter
+- Dev: remove python dependency parallax
+- Dev: scripts: implement non-root support with crmsh.prun
+- Dev: implement non-root support for crm_pssh with crmsh.prun
+- Dev: remove some direct calls to parallax module
+- Dev: prun: add special handling for localhost
+- Dev: refine non-root sudoer support for crmsh.parallax.parallax_slurp
+- Dev: refine non-root sudoer support for crmsh.parallax.parallax_copy
+- Dev: upgradeutil: adapt to new parallax interface
+- Dev: refine non-root sudoer support for crmsh.parallax.parallax_call (bsc#1210709)
+- Fix: bootstrap: `init --qnetd-hostname` fails when username is not specified (bsc#1211200)
+- Fix: bootstrap: crm cluster join default behavior change in ssh key handling (bsc#1210693)
+- Fix: help: Long time to load and parse crm.8.adoc (bsc#1210198)
+- Fix: cibconfig: use any existing rsc_defaults set rather than create another one (bsc#1210614)
+- Fix: lock: Join node failed to wait init node finished (bsc#1210332)
+- Dev: log_patterns: update patterns for pacemaker version 2.0+
+- Dev: bootstrap: Support replacing sbd device via sbd stage
+- Dev: utils: add auto_convert_role flag for handle_role_for_ocf_1_1 function
+
+*Thu Mar 30 2023 Xin Liang <XLiang@suse.com>
+- Release 4.5.0
+- Dev: bootstrap: Remove /var/lib/crm and ~/.config/crm/crm.conf when removing node
+- Dev: bootstrap: Generate the public key on the remote if it does not exist
+- Fix: utils: qdevice initialization should user_pair_for_ssh() to get appreciated users (crmsh#1157)
+- Fix: crm report: sustain if there are offline nodes (bsc#1209480)
+- Dev: upgradeutil: Change 'upgrade' terminology to 'configuration fix'
+- Dev: utils: Check passwordless between cluster nodes
+- Dev: Dockerfile: Update pacemaker and libqb version
+- Dev: remove 'sudo' prefix internally
+- Fix: validate ssh session when the users is determined by guessing (bsc#1209193)
+- Dev: bootstrap: Change user shell for hacluster on remote node, in init_ssh_impl function
+- Fix: parallax: Use 'sudo bash -c' when executing commands via sudoer (bsc#1209192)
+- Dev: qdevice: Add more debug messages for running commands
+- Dev: log: For the log_only_to_file method, show debug log in debug mode
+
+* Thu Mar 9 2023 Xin Liang <XLiang@suse.com>
+- Release 4.5.0 rc2
+- Dev: version: Bump crmsh version to 4.5.0
+- Fix: bootstrap: Swap hacluster ssh key with other nodes
+- Fix: report: Fix crm report issue under non-root user
+- Fix: bootstrap: Don't save core.debug when saving core.hosts (bsc#1208991)
+- Dev: log: Redirect debug messages into stderr
+
+* Fri Mar 3 2023 Xin Liang <XLiang@suse.com>
+- Release 4.5.0 rc1
+- Fix: qdevice: Unable to setup qdevice under non-root user (bsc#1208770)
+- Dev: upgradeutil: do upgrade silently (bsc#1208327)
+- Fix: bootstrap: `crm cluster join ssh` raises TypeError (bsc#1208327)
+- Dev: utils: Change the way to get pacemaker's version (bsc#1208216)
+- Dev: bootstrap: guess and ask whether to operate in non-root mode (jsc#PED-290)
+- Dev: bootstrap: allow the cluster to operate with ssh session under non-root sudoer (jsc#PED-290)
+- Fix: hawk fails to parse the slash (bsc#1206217)
+- Fix: extra logs while configuring passwordless (bsc#1207720)
+- Dev: utils: Check current user's privilege and give hints to user
+- Dev: ui_configure: Deprecate configure erase sub-command
+- Fix: report: Catch read exception (bsc#1206606)
+- Feature: replace root by a custom user with root privileges
+- Fix: bootstrap: Unset SBD_DELAY_START when running 'crm cluster start' (bsc#1202177)
+- Dev: ui_node: redirect `node delete` to `cluster remove`
+- Feature: bootstrap: Add option -x to skip csync2 initialization stage during the whole cluster bootstrap
+- Dev: parse: complete advised operation values for other actions beside monitor
+- Dev: ui_context: redirect `foo -h`/`foo --help` to `help foo` (bsc#1205735)
+- Fix: qdevice: Adjust SBD_WATCHDOG_TIMEOUT when configuring qdevice not using stage (bsc#1205727)
+- Fix: cibconfig: Complete promotable=true and interlave=true for Promoted/Unpromoted resource (bsc#1205522)
+- Fix: corosync: show corosync ring status if has fault (bsc#1205615)
+- Dev: bootstrap: fix passwordless ssh authentication for hacluster automatically when a new node is joining the cluster (bsc#1201785)
+- Dev: upgradeutil: automated init ssh passwordless auth for hacluster after upgrading (bsc#1201785)
+- Dev: parse: cli_to_xml: populate advised monitor/start/stop operations values
+- fix: log: fail to open log file even if user is in haclient group (bsc#1204670)
+- Fix: sbd: Ask if overwrite when given sbd device on interactive mode(bsc#1201428)
+- Dev: bootstrap: Adjust cluster properties including priority-fencing-delay
+- Fix: ui_cluster: 'crm cluster stop' failed to stop services (bsc#1203601)
+- Dev: bootstrap: Adjust pcmk_delay_max and stonith-timeout for all configured fence agents
+- Dev: cibconfig: "crm config show related:xxx" provides partial search among class, provider, type fields
+- Fix: crash_test: do not use firewalld to isolate a cluster node (bsc#1192467)
+- Dev: bootstrap: Add delay to start corosync when node list larger than 5
+- Dev: log: print begin and end marker in different lines in status_long
+- Dev: parallax: Add LogLevel=error ssh option to filter out warnings (bsc#1196726)
+- Revert "Fix: utils: Only raise exception when return code of systemctl command over ssh larger than 4 (bsc#1196726)" (bsc#1202655)
+- fix: configure: refresh cib before showing or modifying if no pending changes has been made (bsc#1202465)
+- Fix: bootstrap: Use crmsh.parallax instead of parallax module directly (bsc#1202006)
+
+* Wed Aug 10 2022 Xin Liang <XLiang@suse.com>
+- tag: 4.4.1 for bug fix
+- Fix: utils: use -o and -n to compare files instead of strings for crm_diff (bsc#1201312)
+- Dev: bootstrap: remove cluster add sub-command
+- Fix: bootstrap: -N option setup the current node and peers all together (bsc#1175863)
+- Dev: doc: add help info for related: prefix for 'configure show' command
+- Dev: cinconfig: enable "related:" prefix to show the objects by given ra type
+- Fix: crm report: use sudo when under non root and hacluster user (bsc#1199634)
+- Fix: utils: wait4dc: Make change since output of 'crmadmin -S' changed(bsc#1199412)
+- Fix: bootstrap: stop and disable csync2.socket on removed node (bsc#1199325)
+- Fix: crm report: Read data in a save way, to avoid UnicodeDecodeError(bsc#1198180)
+- Fix: qdevice: Add lock to protect init_db_on_qnetd function (bsc#1197323)
+- Fix: utils: Only raise exception when return code of systemctl command over ssh larger than 4 (bsc#1196726)
+
+* Thu Feb 17 2022 Xin Liang <XLiang@suse.com>
+- Release 4.4.0
+- Dev: README: update with unit tests steps
+- Dev: crmsh-ci.yml: Add python3.6 and 3.10 into unit test list
+- Dev: tox: Adjust tox.ini, add py36 and py310 in envlist
+
+* Thu Feb 8 2022 Xin Liang <XLiang@suse.com>
+- Release 4.4.0 rc2
+- Fix: sbd: not overwrite SYSCONFIG_SBD and sbd-disk-metadata if input 'n'(bsc#1194870)
+- Dev: bootstrap: the joining node retries an active cluster
+- Dev: behave: Change docker cgroup driver as systemd
+- Dev: ui_node: Use diff and patch instead of replace cib
+- Dev: crm report: Add dpkg support
+
+* Thu Jan 14 2022 Xin Liang <XLiang@suse.com>
+- Release 4.4.0 rc1
+- Fix: bootstrap: Don't change pacemaker.service bootup preference (bsc#1194616)
+- Fix: log: Change the log file owner as hacluster:haclient (bsc#1194619)
+- Dev: crm.conf: Add OCF_1_1_SUPPORT flag to control ocf 1.1 feature
+- Dev: doc: Introduce promotable clone and role Promoted/Unpromoted
+- Fix: crash_test: Adjust help output of 'crm cluster crash_test -h'(bsc#1194615)
+- Dev: utils: Convert Master/Slave to Promoted/Unpromoted if schema support OCF 1.1
+- Dev: xmlutil: Replace Promoted/Unpromoted as Master/Slave when OCF 1.0 schema detected
+- Dev: doc: Replace pingd as ocf:pacemaker:ping
+- Dev: ui_resource: set target-role as Promoted/Unpromoted when doing promote or demote
+- Dev: ra: Support Promoted/Unpromoted
+- Dev: ocfs2: Fix running ocfs2 stage on cluster with diskless-sbd
+- Fix: bootstrap: Change default transport type as udpu(unicast) (bsc#1132375)
+- Dev: bootstrap: Avoid duplicated setting for rsc_defaults
+- Fix: ui_configure: Give a deprecated warning when using "ms" subcommand (bsc#1194125)
+- Fix: xmlutil: Parse promotable clone correctly and also consider compatibility (bsc#1194125)
+- Dev: doc: Rename hb_report as crm report
+- Dev: crm report: Get distribution info correctly and reuse it
+- Dev: crm_report: Integrate report log into crmsh logging
+- Dev: log: Print new line when input using default value in interactive mode
+- Fix: bootstrap: Change log info when need to change user login shell (bsc#1194026)
+- Dev: crm_report: Move hb_report directory to crmsh/report
+- Dev: doc: Mention /etc/crm/profiles.yml in man crm
+- Dev: ui_node: Delete node directly using cibadmin if crm_node -R failed
+- Dev: xmlutil: Add class CrmMonXmlParser to parse xml output of crm_mon
+- Dev: ui_cluster: Exit stop process when there is no DC
+- Dev: ui_cluster: check dlm controld ra is running when stop cluster
+- Dev: log: In status_long function, add a blank line when exception
+- Revert "Dev: ui_cluster: Make sure node is online when stop service"
+- Dev: sbd: Adjust timeout related values
+- Dev: ui_cluster: check if qdevice service started when starting cluster if qdevice configured
+- Dev: idmgmt: Avoid leading with number for ID
+- Dev: ui_cluster: Check service is available before enable/disable qdevice
+- Dev: ui_node: Improve node standby/online methods
+- Dev: ui_cluster: Remove node from node list if node is unreachable
+- Dev: Give warning when no-quorum-policy not set as freeze while using DLM
+- Fix: crm: To avoid the potential "permission denied" error under other users (boo#1192754)
+- Fix: ui_resource: Parse node and lifetime correctly (bsc#1192618)
+- Dev: doc: Consolidate help info for those using argparse
+- Dev: ui_cluster: Make sure node is online when stop service
+- Dev: ui_cluster: Graceful shutdown dlm
+- Dev: ui_cluster: Support multi sub-commands with --all option or specific node
+- orderedset.py: fix deprecation on collections.MutableSet
+- Dev: crm report: Consolidate collect functions in collect.py and running them in parallel
+- Dev: crm report: Collect report using multiprocessing correctly
+- Dev: CI: change docker image as leap 15.2, and enlarge the timeout value for each CI case
+- Fix: ui_resource: Parse lifetime option correctly (bsc#1191508)
+- Dev: log: Rotate crmsh.log as 1M and backup count as 10
+- Fix: bootstrap: Add /etc/crm/crm.conf and /etc/crm/profiles.yml into /etc/csync2/csync2.cfg (bsc#1190466)
+- Dev: Using python logging in all crmsh modules
+- Dev: hb_report: Integrate hb_report logging
+- Dev: crash_test: Integrate crash test logging
+- Dev: crm: Load python logging config in /usr/sbin/crm
+- Dev: log: Using logging as log system in crmsh
+- Dev: msg: Remove msy.py
+- Dev: constants: Add color const for logging
+- Fix: utils: Improve detect_cloud function and support non-Hyper-V in Azure
+- Fix: hb_report: Using python way to collect ra trace files (bsc#1189641)
+- Fix: bootstrap: Adjust corosync and sbd parameters according to the profile environment detected (bsc#1175896)
+- Fix: sbd: adjust sbd systemd TimeoutStartSec together with SBD_DELAY_START
+- Dev: Makefile: add etc/profiles.yml and move crm.conf.in into etc
+- Fix: doc: Note that resource tracing is only supported by OCF RAs(bsc#1188966)
+- Dev: ui_resource: Enhancement trace output
+- Fix: bootstrap: adjust host list for parallax to get and copy known_hosts file(bsc#1188971)
+- Medium: ra: performance/usability improvement (avoid systemd)
+- Dev: ui_context: Add info when spell-corrections happen
+- Dev: ocfs2: set no-quorum-policy as freeze when configuring OCFS2
+- Fix: parse: Should still be able to show the empty property if it already exists(bsc#1188290)
+- Dev: qdevice: Split class QDevice into qdevice.py from corosync.py
+- Fix: bootstrap: check for missing fields in 'crm_node -l' output (bsc#1182131)
+- Fix: resource: make untrace consistent with trace (bsc#1187396)
+- Dev: sbd: enable SBD_DELAY_START in virtualization environment
+- Fix: ocfs2: Skip verifying UUID for ocfs2 device on top of raid or lvm on the join node (bsc#1187553)
+- Dev: sbd: Split class SBDManager into sbd.py from bootstrap.py
+
+* Thu Jun 17 2021 Xin Liang <XLiang@suse.com>
+- Release 4.3.1
+- Fix: history: use Path.mkdir instead of mkdir command(bsc#1179999)
+- Dev: doc: replace preflight check doc as crash test doc
+- Dev: crash_test: Add big warnings to have users' attention to potential failover
+- Dev: crash_test: rename preflight_check as crash_test
+- Fix: bootstrap: update sbd watchdog timeout when using diskless SBD with qdevice(bsc#1184465)
+- Dev: utils: allow configure link-local ipv6 address
+- Dev: bootstrap: return when not specify ocfs2 device on interactive mode
+- Fix: parse: shouldn't allow property setting with an empty value(bsc#1185423)
+- Dev: crm.8.adoc: remove redundant help message
+- Fix: help: show help message from argparse(bsc#1175982)
+- Dev: ocfs2: add ocfs2.OCFS2Manager to manage ocfs2 configure process
+- Dev: watchdog: split class Watchdog into watchdog.py from bootstrap.py
+- Dev: bootstrap: raise exception and execute status_done on success
+- Fix: bootstrap: add sbd via bootstrap stage on an existing cluster (bsc#1181906)
+- Fix: bootstrap: change StrictHostKeyChecking=no as a constants(bsc#1185437)
+- Dev: cibconfig: resolve TypeError for fencing-topology tag
+- Dev: bootstrap: change status_long with contextmanager
+- Dev: bootstrap: disable unnecessary warnings (bsc#1178118)
+- Fix: bootstrap: sync corosync.conf before finished joining(bsc#1183359)
+- Dev: add "crm corosync status qdevice" sub-command
+- Dev: ui_cluster: add qdevice help info
+- Dev: ui_cluster: enable/disable corosync-qdevice.service
+- Fix: bootstrap: parse space in sbd device correctly(bsc#1183883)
+- Dev: preflight_check: move preflight_check directory into crmsh
+- Fix: bootstrap: get the peer node name correctly (bsc#1183654)
+- Fix: update verion and author (bsc#1183689)
+- Dev: bootstrap: enable configuring qdevice on interactive mode
+- Fix: ui_resource: change return code and error to warning for some unharmful actions(bsc#1180332)
+- Dev: README: change the build status link in README
+- Dev: lock: change lock directory under /run
+- Fix: bootstrap: raise warning when configuring diskless SBD with node's count less than 3(bsc#1181907)
+- Fix: bootstrap: Adjust qdevice configure/remove process to avoid race condition due to quorum lost(bsc#1181415)
+- Dev: utils: remove unused utils.cluster_stack and its related codes
+- Dev: cibconfig: remove related code about detecting crm_diff support --no-verion
+- Fix: ui_configure: raise error when params not exist(bsc#1180126)
+- Dev: doc: remove doc for crm node status
+- Dev: ui_node: remove status subcommand
+- Fix: hb_report: walk through hb_report process under hacluster(CVE-2020-35459, bsc#1179999; CVE-2021-3020, bsc#1180571)
+- Fix: bootstrap: setup authorized ssh access for hacluster(CVE-2020-35459, bsc#1179999; CVE-2021-3020, bsc#1180571)
+
+* Fri Feb 19 2021 Xin Liang <XLiang@suse.com>
+- Release 4.3.0
+- Dev: doc: add analyze and preflight_check help messages in doc
+- Dev: analyze: Add analyze sublevel and put preflight_check in it
+- Dev: utils: change default file mod as 644 for str2file function
+- Dev: hb_report: Detect if any ocfs2 partitions exist
+- Dev: lock: give more specific error message when raise ClaimLockError
+- Fix: Replace mktemp() to mkstemp() for security
+- Dev: unit test cases for preflight check ASR SBD feature utils.py
+- Fix: Remove the duplicate --cov-report html in tox.
+- Dev: unit test cases for preflight check ASR SBD feature check.py and task.py
+- Fix: fix some lint issues.
+- Fix: Replace utils.msg_info to task.info
+- Fix: Solve a circular import error of utils.py
+- Fix: hb_report: run lsof with specific ocfs2 device(bsc#1180688)
+- Dev: corosync: change the permission of corosync.conf to 644
+- Fix: preflight_check: task: raise error when report_path isn't a directory
+- Fix: bootstrap: Use class Watchdog to simplify watchdog config(bsc#1154927, bsc#1178869)
+- Dev: Polish the sbd feature.
+- Dev: Replace -f with -c and run check when no parameter provide.
+- Fix: Fix the yes option not working
+- Fix: Remove useless import and show help when no input.
+- Dev: Correct SBD device id inconsistenc during ASR
+- Fix: completers: return complete start/stop resource id list correctly(bsc#1180137)
+- Dev: Makefile.am: change makefile to integrate preflight_check
+- Medium: integrate preflight_check into crmsh
+- Fix: bootstrap: make sure sbd device UUID was the same between nodes(bsc#1178454)
+- Fix: utils: skip if no netmask in the result of ip -o addr show(bsc#1180421)
+- Fix: bootstrap: add /etc/modules-load.d/watchdog.conf into csync.cfg(bsc#1180424)
+- Low: bootstrap: make invoke return specific error(bsc#1177023)
+- Dev: test: add timeout-minutes to each test job
+- Fix: bootstrap: Refactor join_lock.py for more generic using purpose
+- Dev: bootstrap: use ping to test host is reachable before joining
+- Dev: unittset: adjust unit test code for setup_passwordless_with_other_nodes function
+- Low: bootstrap: check cluster was running on init node
+- Fix: bootstrap: use class JoinLock to manage lock in parallel join(bsc#1175976)
+
+* Tue Dec 1 2020 Xin Liang <XLiang@suse.com>
+- Release 4.2.1
+- Fix: utils: improve disable_service and enable_service function(bsc#1178701)
+- Fix: bootstrap: disable corosync-qdevice if not configured(bsc#1178701)
+- Low: bootstrap: should include /etc/sysconfig/nfs into csync2.cfg(bsc#1178373)
+- Low: bootstrap: minor change for _get_sbd_device_interactive function(bsc#1178333)
+- Fix: hb_report: collect corosync.log if it defined in config file(bsc#1148874)
+- Fix: ui_cluster: check service status while start/stop(bsc#1177980)
+- Fix: bootstrap: Stop hawk service when removing node(bsc#1175708)
+- Fix: cibverify: give warning if crm_verify return warning(bsc#1122391)
+- Fix: parse: convert score to kind for rsc_order configure(bsc#1122391)
+- Fix: bootstrap: remove specific configured address while removing node(bsc#1165644)
+- Fix: hb_report: fix sanitize functionality(bsc#1163581)
+- FIx start_delay with start-delay
+- fix on_fail should be on-fail
+- Low: config: Try to handle configparser.MissingSectionHeaderError while reading config file
+- Medium: ui_configure: Obscure sensitive data by default(bsc#1163581)
+- Fix: hb_report: collect archived logs(bsc#1148873)
+- Low: bootstrap: check whether sbd package installed
+- Low: bootstrap: Improve qdevice configure process * More reasonable naming for variables * More function docstrings * Move function to more reasonable location * Create functions to integrate similar functions inside one * Change big function to small one, more easier for unit test, like: * Refactor functions * Create utils.cluster_run_cmd function to avoid using crm cluster run directly in code
+- Low: bootstrap: swap keys with other nodes when join_ssh(bsc#1176178)
+- Fix: bootstrap: revert ssh_merge function for compatibility(bsc#1175057)
+- Fix: bootstrap: adjust sbd config process to fix bug on sbd stage(bsc#1175057)
+- Low: corosync: handle the return code of corosync-quorumtool correctly(bsc#1174588)
+- Low: ui_corosync: copy ssh key to qnetd while detect need password(bsc#1174385)
+- Low: hb_report: Fix collecting of binary data (bsc#1166962)
+- High: bootstrap: ssh key configuration improvement(bsc#1169581)
+- High: bootstrap: bootstrap network improvement
+- Revert "Fix: bootstrap: crmsh use its own specific ssh key(bsc#1169581)"
+- Low: cibconfig: Avoid adding the ID attribute to select_* nodes
+- High: bootstrap: using class SBDManager for sbd configuration and management(bsc#1170037, bsc#1170999)
+- Fix: bootstrap: crmsh use its own specific ssh key(bsc#1169581)
+- Low: bootstrap: change ha-cluster-bootstrap log path
+- Low: ui_corosync: print cluster nodes while getting quorum and qnetd status
+- Low: bootstrap: exit with proper error messages when ssh return failed
+- Low: ui_cluster: use argparse choices to validate -i and -t option
+- Low: corosync: Use with statement to open file
+- Fix: ui_resource: refresh <Tab> should complete resource first(bsc#1167220)
+- Low: ui_context: give warning if using alias command
+- Low: bootstrap: Simplify bootstrap context
+- Low: corosync: Improve qdevice configure process
+- Fix: doc: Update man page about completion example of crm resource(bsc#1166644)
+- Fix: bootstrap: Change condition to add stonith-sbd resource(bsc#1166967)
+- Fix: bootstrap: use csync2 '-f' option correctly(bsc#1166684)
+- Low: setup.py: update crmsh's version
+- Fix: crmsh.spec.in: enable completion of crm command(bsc#1166329)
+- Low: crmsh.spec.in: sync contents from NHF's crmsh.spec file
+- Low: utils: update detect_cloud pattern for aws
+- Low: doc: update configure.set documentation
+- Feature: configure: make configure.set to update operation
+- Low: replace configparser.SafeConfigParser as configparser.ConfigParser
+- Fix: ui_cluster: Not allowed space value for option (bsc#1141976)
+- Fix: crmsh.spec: using mktemp to create tmp file(bsc#1154163)
+- Fix: bootstrap: set placement-strategy value as "default"(bsc#1129462)
+- Fix: hb_report: disable dump all tasks stack into dmesg(bsc#1158060)
+
+* Mon Dec 23 2019 Xin Liang <XLiang@suse.com> and many others
+- Release 4.2.0
+- Merge pull request #464 from liangxin1300/2019_crmsh_qdevice_qnetd
+- Low: ui_cluster: replace --qdevice as --qnetd-hostname
+- Low: corosync: add log and debug messages on each certificate steps
+- Low: ui_cluster: change qdevice related option's help message
+- Low: bootstrap: support qdevice heuristics
+- Low: bootstrap: start qdevice/qnetd service when not overwrite configuration
+- Low: ui_corosync: improve corosync status sub-command
+- Low: bootstrap: when removing qdevice, remove qdevice database
+- Low: bootstrap: qdevice certification process when cluster join
+- Low: ui_cluster: change option info for qdevice/qnetd
+- Low: bootstrap: qdevice certification process when cluster init
+- Low: bootstrap: interface for removing qdevice
+- Low: corosync: check tie-breaker is a valid nodeid
+- Low: bootstrap: improve init_qdevice function
+- Low: bootstrap: write qdevice config section when configuring qdevice in stage
+- Low: bootstrap: adjust corosync configuration for qdevice
+- Low: bootstrap: make qdevice process as a bootstrap stage
+- Low: bootstrap: manage qnetd node
+- Low: bootstrap: valid qdevice parameters
+- Merge pull request #483 from liangxin1300/20191105_python_behave
+- Merge pull request #484 from liangxin1300/20191112_nose_verbose
+- Merge pull request #482 from liangxin1300/20191101_parallax_functions
+- Low: parallax: create class Parallax to simplify using parallax
+- Merge pull request #480 from aleksei-burlakov/config-do_property
+- Doc: ui_configure: do_property: ask to remove maintenance from resources and nodes
+- Low: ui_configure: do_property: ask to remove maintenance from resources and nodes
+- Merge pull request #476 from liangxin1300/20191011_ssh_key
+- Merge pull request #478 from aleksei-burlakov/node-do_maintenance
+- Doc: ui_node: do_maintenance: ask to remove maintenance attr from primitives
+- Low: ui_node: do_maintenance: ask to remove maintenance attr from primitives
+- Merge pull request #422 from liangxin1300/20190227a
+- Merge pull request #479 from aleksei-burlakov/resource-do_maintenance
+- Low: ui_resource: ask about ALL primitives when overriding attributes
+- Fix: corosync: reject append ipaddress to config file if already have(bsc#1127095, 1127096)
+- Low: bootstrap: create authorized_keys file if not exists
+- Low: bootstrap: add "--no-overwrite-sshkey" option to avoid SSH key be overwritten
+- Low: bootstrap: don't overwrite ssh key if already exists
+- Merge pull request #465 from liangxin1300/20190814a
+- Merge pull request #461 from gao-yan/sanitize-orig-cib-as-patch-base
+- Merge pull request #472 from vvidic/yaml-load
+- Merge pull request #471 from aleksei-burlakov/crm-resource-maintaintenance
+- Doc: ui_resource: resolve maintenance vs is-managed conflict
+- Low: ui_resource: resolve maintenance vs is-managed conflict
+- Scripts: fix yaml loader warning
+- Merge pull request #468 from aleksei-burlakov/crm-resource-maintaintenance
+- Low: doc: update cluster run help documetation
+- Low: ui_cluster: running command for multiple specific nodes
+- Fix: ui_cluster: refactor function list_cluster_nodes and handle the None situation(bsc#1145520)
+- Low: ui_resource: maintenance: stop using crm_resource
+- Merge pull request #467 from liangxin1300/revert_52a44fdce
+- Merge pull request #466 from liangxin1300/20190816_bsc1145823
+- Fix: utils: fix logic for process non comments line(bsc#1145823)
+- High: cibconfig: Correctly sanitize the original CIB as patch base (bsc#1127716, bsc#1138405)
+- Revert "high: cibconfig: Use correct CIB as patch base (bsc#1127716)"
+- Partially revert "medium: cibconfig: Sanitize CIB for patching (bsc#1127716)"
+- Merge pull request #457 from vvidic/commmon
+- Doc: manpages: Fix spelling
+
+* Fri Jun 21 2019 Diego Akechi <dakechi@suse.com> and many others
+- Release 4.1.0
+- Fix: utils: issue in to_ascii (bsc#1138115)
+- Fix: bootstrap: bindnetaddr should accept both network and specific IP(bsc#1135585, bsc#1135586)
+- Fix: hb_report: analysis.txt should includes warning, error, critical messages(bsc#1135696)
+- Included Contributing Section on README.
+- medium: ui_node: Check corosync state before clearstate (bsc#1129702)
+- fix: hb_report: handle UnicodeDecodeError(bsc#1130715) * setting error='replace' to replace invalid utf-8 characters * try to catch UnicodeDecodeError and print traceback
+- medium: cibconfig: Sanitize CIB for patching (bsc#1127716)
+- high: cibconfig: Use correct CIB as patch base (bsc#1127716)
+- medium: parse: Detect and error on illegal ordering of op attributes (bsc#1129210)
+- medium: utils: Handle sysconfig values containing = (bsc#1129317)
+- low: hb_report: collect output of "sbd dump" and "sbd list"(bsc#1129383)
+- low: msg: add timestamp for DEBUG messages(bsc#1129380)
+- Fix: bsc#1129719: check command and related files exist
+- Low: doc: add related notice for new "promot*" tags
+- High: testcase: add testcases from new added "promot*"tags
+- High: constants: add "promotable", "promoted-max" and "promoted-node-max" in clone meta attributes
+- Low: testcase: add testcase for #425
+- Fix: cibconfig: #425 The ID attribute is not required for select and select_attributes
+- doc: Add guide to releasing new versions
+- medium: scripts: Set kind for order constraints, not score (bsc#1123187)
+- low: utils: add support for dpkg
+- low: utils: add support for apt-get
+- low: utils: convert string contstants to bytes
+- High: Testcases: update the testcases/bugs for bsc#1120554
+- Fix: bsc#1120857,1120856 bootstrap warning messages should better start with like "WARNING:" instead of "!"
+- Fix: bsc#1120554, bsc#1120555 crmsh crashed when using configure>template>apply
+- medium: cibverify: Increase log level for verification (bsc#1116559)
+- high: cibconfig: Normalize - to _ in param names (bsc#1111579)
+- medium: ra: Handle obsoletes attribute (bsc#1111579)
+- ui_cluster: restart cluster is added
+- auto-commit enabling/disabling maintenance mode for a whole cluster
+- medium: bootstrap: Skip netmask check on GCP (bsc#1106946)
+- medium: utils: Detect local IP on GCP (bsc#1106946)
+- medium: bootstrap: Correctly check rrp_mode flag (bsc#1110463)
+- medium: bootstrap: Pick first match for multiple routes (bsc#1106946)
+- medium: utils: Use cloud metadata service to discover IP (bsc#1106946)
+- Fix: bootstrap: change default ip address way for both mcast and unicat(bsc#1109975,bsc#1109974)
+- Fix incorrect bindnetaddr in corosync.conf (bsc#1103833) (bsc#1103834)
+- fix: bootstrap: non interactive unicast cluster init and join(bsc#1109172)
+- medium: bootstrap: Disable strict host key checking on all ssh invocations
+- support ocfs2 log collecting
+- hbreport: process name change for pacemaker 2.0(bsc#1106052)
+- Fix: bootstrap: "-i" option doesn't work(bsc#1103833, bsc#1103834)
+- Low: bootstrap: No warning message when using '-q'
+- high: ra: Support Pacemaker 2.0 daemon names
+- high: config: Locate pacemaker daemons more intelligently (#67) (bsc#1096783)
+- Low: Travis-CI: make sure exit value is not 0 while unittest failed
+- Fix: TypeError in logparser.py(bsc#1093433)
+- High: hbreport: fix UnicodeEncodeError while print(bsc#1093564)
+
+* Thu Mar 28 2019 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 3.0.4
+- Fix: bootstrap: "-i" option doesn't work(bsc#1103833, bsc#1103834)
+- high: bootstrap: Use default IP address for ring0 (bsc#1069142)
+- low: bootstrap: change multi-heartbeats as option-mode
+- low: bootstrap: Clarify messages
+- low: bootstrap: give a confirm message when remove node
+- low: bootstrap: Improve comments / error messages
+- low: bootstrap: Fall back to logging into $TMPDIR/ha-cluster-bootstrap.log
+- low: bootstrap: Check for firewalld before SuSEfirewall2
+- medium: bootstrap: Add support for chrony
+- Detect firewall by checking installed packages
+- low: ui_cluster: complete node name once or the same node name will be completed many times with many "Tab"s
+- medium: enable add the second heartbeat line for unicast Changes inlcude: 1. IPv4 & IPv6 support 2. user should specify local IP for ringX_addr when init and join both 3. user input must be one of local IP addresses 4. peer ringX_addr and local ringX_addr must in the same network
+- medium: bootstrap: check init options before running * we can add other option checkings to here later
+- low: bootstrap: simplify the code for checking adminIP
+- low: bootstrap: reset dev value when input of dev not valid
+- low: bootstrap: catch OSError except instead of crash
+- low: bootstrap: give error hints when use sbd without watchdog
+- medium: enable add the second heartbeat line for mcast Changes include: 1. skip second heartbeat configure when number of local networks < 2 2. bindnetaddrs must be different and the gap between ports must larger than 1 3. give different default value for second configuration 4. IPv4 & IPv6 support
+- medium: bootstrap: adminIP should not be exist before config
+- medium: bootstrap: adminIP should not be the network self
+- fix: utils.list_cluster_nodes: use "crm_node -l" first
+- medium: utils: list_cluster_nodes: read nodes list from cib.xml
+- medium: utils: extend "IP/Network" codes for IPv4/IPv6 both
+- low: ui_cluster: strip "None" when cmd stdout is None
+- medium: bootstrap: valid adminIP The administration virtaul IP must: 1) have a valid IPv4/IPv6 formation 2) belongs one of local networks
+- medium: bootstrap: use strict regrex and valid function for bindnetaddr/mcastaddr
+- low: bootstrap: when node joining, it will take a while after init_cluster_local
+- low: bootstrap: join_csync2 may take a long while so, use status_long and status_done to give the hints
+- medium: bootstrap: configure with IPv6(unicast)
+- medium: bootstrap: configure with IPv6(mcast) Changes include: 1) use "-I" option stand for IPv6 in ui_cluster 2) totem.ip_version should set as "ipv6" 3) choose one interface which has IPv6 address as default values 4) each node must have an uniqe nodeid when using IPv6, the nodeid came from IPv6 address in default interface 5) for IPv6 network compute, learn from https://github.com/tehmaze/ipcalc
+- medium: bootstrap: disable completion and history when running bootstrap
+- low: ui_cluster: Add two new lines before add a new node
+- low: bootstrap: Don't rely on ha-cluster-* shortcuts
+- fix: bootstrap: remove cib.xml before corosync.add_node
+- low: bootstrap: color the input error message
+- medium: bootstrap: add callback function to check valid port range
+- high: cibconfig: Normalize - to _ in param names (bsc#1111579)
+- medium: ra: Handle obsoletes attribute (bsc#1111579)
+- Fix missing argument in RAInfo's constructor.
+
+* Thu Jun 28 2018 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 3.0.3
+- low: bootstrap: suppress the error message
+- high: bootstrap: expected votes wouldn't update in unicast mode
+- medium: bootstrap: run "csync2_update" for all files after new joining node call csync2_remote (bsc#1087248)
+- medium: utils: Avoid crash on missing process id (bsc#1084730)
+
+* Thu Jun 28 2018 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 3.0.2
+- high: ra: Support Pacemaker 2.0 daemon names
+- high: config: Locate pacemaker daemons more intelligently (#67) (bsc#1096783)
+- Fix: TypeError in logparser.py(bsc#1093433)
+- Parse /32 route entries
+- medium: ui_cluster: Stop corosync when stopping pacemaker (bsc#1066156)
+- low: ui_node: normal is deprecated in favor of member
+- fix: ui_resource: Using crm_failcount instead of crm_attribute(bsc#1074127)
+- Fix is_program(dmidecode) error (bsc#1070344)
+- low: ra: Don't require deprecated parameters (#321)
+- medium: bootstrap: Missing dmidecode on ppc64le (bsc#1069802)
+- low: bootstrap: Improve message when sbd is not installed (bsc#1050427)
+- Fix SBD configuration when using SBD device (Fixes #235)
+
+
+* Fri May 18 2018 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 4.0.0 - Python3 only compatible.
+- Parse /32 route entries
+- low: terminal will lose cursor after type ctrl+c(bsc#1090626)
+- high: bash_completion: Adjust for non-interactive mode(bsc#1090304)
+- low: ui_configure: Adjust prompt string after help messages(bsc#1090140)
+- doc: Fix unbalanced example marker
+- high: hbreport: adjustment for hbreport(bsc#1088784) * encoding utf-8 when open a file * pacemaker.log should not exclude as HA_LOG
+- high: ui_resource: Undeprecate refresh and remove reprobe (bsc#1084736)
+- Update man-2.0.adoc
+- Update man-1.2.adoc
+- Update crm.8.adoc
+- Update man-3.adoc
+- low: bootstrap: Updated authkey generation (bsc#1077389)
+- low: bootstrap: Strip spaces before some status descriptions
+- fix: bootstrap: Create pacemaker_remote authkey(#bsc 1077389)
+- low: bootstrap: Always ask whether to use sbd
+- fix: hb_report: using log_debug instead of log_warning
+- fix: hb_report: got the right file decompressor(bsc#1077553)
+- medium: hb_report: Avoid calling deprecated network utilities (bsc#1073638)
+- high: crm_script: Python2->3 string conversion crash in wizards (bsc#1074835)
+- fix: hb_report: Collect irregular log file
+- fix: hb_report: Don't create *.log.info file if not in timespan
+- medium: hb_report: implement dlm_dump info
+- fix: hb_report: sbd info patch for review
+- fix: hb_report: add some new packages name in constants.PACKAGES
+- fix: hb_report: collect sbd info (bsc#1076389)
+- low: constants: Add bundle to more lists of things
+- low: xmlutil: Add bundle to sort (bsc#1076239)
+- medium: constants: Add bundle to constants (bsc#1076239)
+- implement logic for lms and ffsplit
+- low: ui_node: normal is deprecated in favor of member
+- medium: ui_cluster: Stop corosync when stopping pacemaker (bsc#1066156)
+- low: ui_configure: no complete for rename new_id
+- high: bootstrap: Add QDevice/QNetd support (bsc#1070961)
+- medium: bootstrap: Don't try to remove full nodes from remote nodes
+- low: bootstrap: Don't ssh to localhost in remove
+- clvm-vg wizard: update to use LVM-activate RA
+- clvm wizard: update to use lvmlockd instead of clvmd
+- fix: cibconfig: cleanup codes about default-action-timeout
+- high: scripts: Enable complex expressions in when: (bsc#1074835)
+- medium: hb_report: Support new pacemaker.log location (fate#324508)
+- low: ui_configure: enable completer for rsc_template
+- low: ui_configure: Complete rsc template correctly
+- fix: ra: Convert bytes to str
+- fix: ui_resource: Using crm_failcount instead of crm_attribute(bsc#1074127)
+- low: ui_configure: improve do_group completer
+- high: scripts: Fix Python 3 migration issues in health, check-uptime (bsc#1071519)
+- high: parse: Support new alert syntax (#280) (bsc#1069129)
+- low: ui_configure: use filter_keys replace any_startswith
+- fix: hb_report: return "" to avoid TypeError
+- high: bootstrap: Fix firewall reload command invocation (bsc#1071108)
+- high: bootstrap: Encode, not decode (bsc#1070344)
+- Fix writing non-ascii chars to log (bsc#1070344)
+- Fix is_program(dmidecode) error (bsc#1070344)
+- low: ui_configure: fix for 309d2e, remove "id=" in a save way
+- low: ra: Don't require deprecated parameters (#321)
+- medium: bootstrap: Missing dmidecode on ppc64le (bsc#1069802)
+- high: crm_rpmcheck: Fix bytes to str encoding error (bsc#1069294)
+- high: bootstrap: Use default IP address for ring0 (bsc#1069142)
+- low: cibconfig: replace etree.tostring to xml_tostring for debug/error messages
+- low: ui_configure: Improve group/clone/ms completer
+- low: bootstrap: Change error/confirm message with specific device name
+- medium: bootstrap: fix init vgfs crash if no "-o device" option
+- medium: bootstrap: fix init storage crash if no value input
+- low: ui_configure: Improve do_clone completer
+- medium: scripts: make sure gfs2 can be configured using hawk(bsc#1067123)
+- low: ui_configure: fix for db4fc62, not complete if previous' value changed
+- low: utils: convert bytes to str(bsc#1067823)
+- Low: ui_context: Continue completing when input is an alias
+- medium: ui_configure: fix crash when no args given
+- high: utils: Use python3 in util scripts (bsc#1067823)
+- Low: script: Add nginx script for Hawk
+- medium: filter exist args
+- high: bootstrap: Use firewall-offline-cmd for firewalld (bsc#1067498)
+- medium: hb_report: Verify corosync.conf exists before opening it (bsc#1067456)
+- low: config: Collect /var/log/ha-cluster-bootstrap.log (bsc#1067438)
+- medium: bootstrap: Avoid SSH to localhost (bsc#1067324)
+- low: bootstrap: Clarify removal warning
+- low: bootstrap: Avoid printing None instead of NTP service name
+- high: bootstrap: Fix readline error in cluster remove (bsc#1067424)
+- low: cibconfig: use refresh instead of reset after commit
+- high: bootstrap: revert corosync ports for mcast configuration as well (bsc#1066196)
+- high: bootstrap: Revert default corosync port to 5405/5404 (bsc#1066196)
+- low: ui_context: reset term when using help command+ctrlC If dont do this, type "ctrl+C" when using help command will cause the term lost curses.
+- medium: NewCommit: ui_configure: complete for ra actions * append action items which in agent default actions; * monitor_Master will be mapped to "monitor role=Master". * monitor_Slave will be mapped to "monitor role=Slave" * remove action items which not in default actions * remove action items which already used * make sure all of default items can be completed
+- medium: ui_configure: Replace compl.null to compl.attr_id where an id is required
+- low: utils: Stop using deprecated functionality
+- medium: ui_context: Stop completing when an id is required
+- low: ui_script: Sort keys when printing JSON
+- Updating the exception type used for catching a missing process id. This change is based on recommendations from @krig regarding handling the base exception class.
+- medium: enable add the second heartbeat line for unicast Changes inlcude: 1. IPv4 & IPv6 support 2. user should specify local IP for ringX_addr when init and join both 3. user input must be one of local IP addresses 4. peer ringX_addr and local ringX_addr must in the same network
+- low: bootstrap: change multi-heartbeats as option-mode
+- medium: bootstrap: check init options before running * we can add other option checkings to here later
+- medium: ui_corosync: adjust do_push's completer
+- low: ui_corosync: adjust do_pull's output hints
+- medium: ui_corosync: adjust for do_delnode * add completer to complete node for delete * limit to udpu transport
+- low: ui_corosync: adjust for do_diff completer * remove node name which already completed * limit completing node number to 2
+- low: ui_root: add completers for do_status
+- Update Dockerfile
+- fix exception of xml sort in python3
+- make it compatible with python3
+- This is a proposed fix for bug 283. The fix was to change the except object type to IOError when a directory cannot be found due to a process that has terminated.
+- medium: bootstrap: Only call firewall-cmd if firewalld is active
+- medium: ui_node: node attribute/status-attr is about node attr, not for resources
+- low: ordereddict: Use builtin if available
+- medium: ui_node: node utilization is about node attr, not for resources
+- medium: ui_configure: in modgroup, comlete none when remove for one ra group
+- medium: ui_configure: in modgroup, complete none after remove
+- medium: ui_configure: in modgroup, add free id and remove id in group
+- low: bootstrap: Clarify messages
+- low: bootstrap: Improve comments / error messages
+- low: bootstrap: Check for firewalld before SuSEfirewall2
+- medium: bootstrap: Add support for chrony
+- Fix SBD configuration when using SBD device (Fixes #235)
+- Detect firewall by checking installed packages
+- medium: ui_resource: start stopped resources and stop started resources
+- low: bootstrap: simplify the code for checking adminIP
+- low: bootstrap: reset dev value when input of dev not valid
+- low: bootstrap: catch OSError except instead of crash
+- low: bootstrap: give error hints when use sbd without watchdog
+- medium: enable add the second heartbeat line for mcast Changes include: 1. skip second heartbeat configure when number of local networks < 2 2. bindnetaddrs must be different and the gap between ports must larger than 1 3. give different default value for second configuration 4. IPv4 & IPv6 support
+- fix: bootstrap: remove cib.xml before corosync.add_node
+- medium: bootstrap: adminIP should not be exist before config
+- medium: bootstrap: adminIP should not be the network self
+- low: main: add hostname in promptstr
+- fix: utils.list_cluster_nodes: use "crm_node -l" first
+- medium: utils: list_cluster_nodes: read nodes list from cib.xml
+- low: ui_node: return False when cibdump2elem return None
+- low: ui_cluster: strip "None" when cmd stdout is None
+- medium: bootstrap: valid adminIP The administration virtaul IP must: 1) have a valid IPv4/IPv6 formation 2) belongs one of local networks
+- medium: utils: extend "IP/Network" codes for IPv4/IPv6 both
+- medium: bootstrap: use strict regrex and valid function for bindnetaddr/mcastaddr
+- low: bootstrap: when node joining, it will take a while after init_cluster_local
+- low: bootstrap: join_csync2 may take a long while so, use status_long and status_done to give the hints
+- medium: bootstrap: configure with IPv6(unicast)
+- medium: bootstrap: configure with IPv6(mcast) Changes include: 1) use "-I" option stand for IPv6 in ui_cluster 2) totem.ip_version should set as "ipv6" 3) choose one interface which has IPv6 address as default values 4) each node must have an uniqe nodeid when using IPv6, the nodeid came from IPv6 address in default interface 5) for IPv6 network compute, learn from https://github.com/tehmaze/ipcalc
+- high: bootstrap: expected votes wouldn't update in unicast mode
+- medium: bootstrap: disable completion and history when running bootstrap
+- low: ui_cluster: change cluster name need restart cluster service
+- low: ui_cluster: Add two new lines before add a new node
+- medium: bootstrap: run "csync2_update" for all files after new joining node call csync2_remote
+- low: config: add "%s" in format
+- low: ui_cluster: run command on a specific node
+- low: bootstrap: color the input error message
+- low: bootstrap: suppress the error message
+- low: bootstrap: Improve message when sbd is not installed (bsc#1050427)
+- low: bootstrap: Don't rely on ha-cluster-* shortcuts
+- low: completers: filter out ms resource when doing promote/demote
+- Add missing ')'
+- medium: ui_cluster: use get_property instead of get_property_w_default
+- low: ui_cluster: show cluster name in cluster status command
+- low: help: adjust the help print width adjust the width between cmd and cmd short description for long cmd name
+- low: command: a clear and better way replace for commit 4ac8d4(Improved cd completion) * code changes happened just in command:_cd_completer function * on the root level, complete sublevel names after "cd " * on the sublevel, complete other sublevel path after "cd ../" * complete sublevel names if this sublevel also have some sublevel names * prevent '..' complete after every tab
+- low: ui_cluster: complete node name once or the same node name will be completed many times with many "Tab"s
+- low: ui_cluster: when have an error for optparse, just return and stay at shell
+- low: ui_cluster: when use help option, do not exit, just print help messages and return
+- low: doc: add cluster rename info
+- medium: ui_cluster: Add cluster rename command * Update /etc/corosync/corosync.conf with the new name on all nodes * Change the cluster-name property in the CIB * Reload the corosync configuration on all nodes(use corosync-cmapctl)
+- medium: bootstrap: add callback function to check valid port range
+- low: bootstrap: give a confirm message when remove node
+- medium: bootstrap: replace 'nodename' to 'seed_host' because when given a node name which is not configured in cluster, program will crash and say - UnboundLocalError: local variable 'nodename' referenced before assignment
+- low: ui_cluster: add "delete" alias for "remove" option
+- low:scripts:health: save health-report when run "crm cluster health"
+- medium:command:adjust the 'ls' print width for long option like 'crm cluster geo-init-arbitrator'
+- low:doc:Add help info for cluster enable/disable option
+- medium: ui_cluster: Add enable/disable option to enable/disable pacemaker service
+- medium: bootstrap: Revert to --clusters as argument name
+- low: doc: Correct minor mistakes in documentation
+- medium: bootstrap: Rename --clusters to --sites (bsc#1047074)
+- low: doc: Clarify --cluster-node parameter to geo-join
+- medium: ui_ra: Improve resource agents completion
+- medium: ui_context: Make all the options can be completed Currently, not all the options can be completed by 'Tab', for example: crm->configure->verify crm->configure->back crm->configure->master crm->cluster->init Maybe because the subcommand doesn't have completer yet; Maybe because the subcommand itself is an alias of other command;
+- Remove "up/back/end" option at root level
+- Improve ls command outputs: * sorted outputs * print results column by column, like other complete results
+- Improved cd completion: * on the root level, complete sublevel names after "cd " * on the sublevel, complete other sublevel path after "cd ../" * prevent '..' complete after every tab
+- medium: bootstrap: Fix watchdog SBD envvars (bsc#1045118)
+- medium: scripts: Relax broadcast IP validation (bsc#1044233)
+- medium: scripts: Clarify help text for NFS wizard (bsc#1044244)
+- high: bootstrap: Add option to enable diskless SBD mode to cluster init (bsc#1045118)
+- remove _keywords/_objects/_regtest/_test options from tab completion
+- low: bootstrap: Fall back to logging into $TMPDIR/ha-cluster-bootstrap.log
+- medium: ui_cluster: Add --force to ha-cluster-remove (bsc#1044071)
+- medium: history: Revert preference of messages over ha-log.txt (bsc#1031138)
+- Doc: add rules in operations example
+- Test: add tests for rules in operations
+- Fix: apply new cliformat to regression tests
+- Feature: add rules support to operations
+- medium: Add support for pacemaker PR#1208
+- doc: Document lifetime parameter format
+- medium: bootstrap: Make arbitrator argument optional (bsc#1038386)
+- doc: geo-join requires --clusters argument (bsc#1037442)
+- medium: bootstrap: Check required arguments to geo-join (bsc#1037421)
+- medium: bootstrap: Handle failure to fetch config gracefully (bsc#1037423)
+- medium: bootstrap: Enable "help geo-init" etc. (bsc#1037417)
+- high: cibconfig: Graph file output option was reversed (bsc#1036595)
+- medium: scripts/health: Make health script available as wizard (fate#320848) (fate#320866)
+- lsb, service, stonith and systemd don't have any providers; so, it shouldn't be completed when type 'tab' after these ra classes.
+- remove bindnetaddr for unicast(bsc#1030437)
+- medium: bootstrap: Set expected_votes based on actual node count (bsc#1033288)
+- low: bootstrap: Fix formatting of confirmation prompt (bsc#1028704)
+- low: utils: Use /proc for process discovery
+- medium: ui_cluster: Fix init with no arguments (bsc#1028735)
+- low: bootstrap: Fix warning for formatting SBD device (bsc#1028704)
+- low: utils: is_process did not work
+- Allow empty fencing_topology (bsc#1025393)
+- doc: Bootstrap howto guide
+
+* Fri Jul 21 2017 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 3.0.1
+- low: ui_cluster: when have an error for optparse, just return and stay at shell
+- low: ui_cluster: when use help option, do not exit, just print help messages and return
+- medium: bootstrap: replace 'nodename' to 'seed_host' because when given a node name which is not configured in cluster, program will crash and say - UnboundLocalError: local variable 'nodename' referenced before assignment
+- medium: bootstrap: Revert to --clusters as argument name
+- Add missing ')'
+- medium: bootstrap: Rename --clusters to --sites (bsc#1047074)
+- medium: bootstrap: Fix watchdog SBD envvars (bsc#1045118)
+- medium: scripts: Relax broadcast IP validation (bsc#1044233)
+- medium: scripts: Clarify help text for NFS wizard (bsc#1044244)
+- high: bootstrap: Add option to enable diskless SBD mode to cluster init (bsc#1045118)
+- medium: ui_cluster: Add --force to ha-cluster-remove (bsc#1044071)
+- medium: history: Revert preference of messages over ha-log.txt (bsc#1031138)
+- medium: bootstrap: Make arbitrator argument optional (bsc#1038386)
+- doc: geo-join requires --clusters argument (bsc#1037442)
+- medium: bootstrap: Check required arguments to geo-join (bsc#1037421)
+- medium: bootstrap: Handle failure to fetch config gracefully (bsc#1037423)
+- medium: bootstrap: Enable "help geo-init" etc. (bsc#1037417)
+- high: cibconfig: Graph file output option was reversed (bsc#1036595)
+- medium: bootstrap: Set expected_votes based on actual node count (bsc#1033288)
+- remove bindnetaddr for unicast(bsc#1030437)
+- medium: scripts/health: Make health script available as wizard (fate#320848) (fate#320866)
+- low: utils: Use /proc for process discovery
+- medium: ui_cluster: Fix init with no arguments (bsc#1028735)
+- low: bootstrap: Fix warning for formatting SBD device (bsc#1028704)
+- Allow empty fencing_topology (bsc#1025393)
+
+* Tue Jan 31 2017 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 3.0.0
+- high: bootstrap: Add bootstrap commands (fate#321114)
+- high: logparser: Update transition RE (#168)
+- high: scripts: Remove script versions of add/remove/init
+- high: utils: Fix typo in tmpf patch (bsc#999683)
+- medium: bootstrap: Configure hawk iff package installed
+- medium: ui_cluster: Fix broken cluster remove command
+- medium: bootstrap: Invoke _remote commands correctly
+- medium: bootstrap: adapt firewall handling to other platforms
+- medium: ui_cluster: Compatibility mode for old cluster init behavior
+- medium: ui_history: Avoid ugly wrapping of diff output
+- medium: hb_report: Make sure this never expands to rm -rf /
+- medium: hb_report: don't use backticks in local
+- low: completers: give the op's hint when type tab after 'op'
+- low: bootstrap: Handle None as result from remote command correctly
+- low: bootstrap: Avoid warning if known_hosts doesn't exist
+- low: bootstrap: Don't check for ptty for _remote stages
+- low: ui_cluster: No need to check the cluster stack in requires
+- low: ui: Fix vim highlightning support.
+- low: ui_script: Fix script list all/names argument handling
+- low: cibconfig: Clearer error for duplicate ID (bsc#1009748)
+- low: ui_cluster: start/stop don't touch corosync, just pacemaker
+
+* Tue Oct 25 2016 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 2.3.2
+- high: history: Quote archive tarball name if it contains spaces (bsc#998959)
+- high: history: Prefer /var/log/messages over ha-log.txt (bsc#998891)
+- high: parse: Support target pattern in fencing topology
+- high: cibconfig: Ensure temp CIB is readable by crm_diff (bsc#999683)
+- medium: corosync: Fix missing variable in del-node
+- medium: scripts: Drop logrotate check from cluster health
+- medium: scripts: Better corosync defaults (bsc#1001164)
+- medium: cibconfig: Remove from tags when removing object
+- medium: ui_configure: option to obscure passwords
+- low: cmd_status: More detail in verify output
+- low: crm_pssh: Fix nodenum envvar name
+- low: cmd_status: Highlight plural forms (bsc#996806)
+- doc: Fix inverted boolean in resource set documentation
+
+* Fri Sep 2 2016 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 2.3.1
+- Require Python 2.6+, not 2.7 (bsc#995611) (#152)
+
+* Fri Aug 12 2016 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 2.3.0
+- medium: constants: Add missing alerts constants (#150) (bsc#992789)
+- high: hb_report: Don't collect logs from journalctl if -M is set (bsc#990025)
+- high: hb_report: Skip lines without timestamps in log correctly (bsc#989810)
+- low: scripts: Fix use of non-relative import for ra
+- medium: tmpfiles: Create temporary directory if non-existing (bsc#981583)
+- medium: xmlutil: reduce unknown attribute to warning (bsc#981659)
+- high: constants: Add maintenance to set of known attributes (bsc#981659)
+- medium: scripts: no-quorum-policy=ignore is deprecated (bsc#981056)
+- low: history: fall back to any log file in report root
+- medium: history: Report better error when history user is not sudoer (bsc#980924)
+- high: history: Store live report in per-user directory (bsc#980924)
+- medium: logparser: Fix use-before-declaration error in logparser
+- low: utils: Clearer error if permission denied when locking (bsc#980924)
+- medium: logparser: Handle read-only access to metadata cache (bsc#980924)
+- doc: Add @steenzout to AUTHORS
+- fix issue #144 by comparing output line by line (#146)
+- fixed version number (#142)
+- added crm to scripts (#143)
+- doc: sort subcommands in the documentation
+- high: parse: Support for event-driven alerts (fate#320855) (#136)
+- medium: ui_resource: Add force argument to resource cleanup (bsc#979420)
+- high: ui_resource: Improved resource move/clear/locate commands
+- medium: ui_resource: Show utilization in output from crm resource scores
+- high: utils: Avoid deadlock if DC changes during idle wait (bsc#978480)
+- low: scripts: Note SBD recommendation in libvirt script (fate#318320)
+- low: scripts: Note SBD recommendation in vmware script (fate#318320)
+- high: ui_root: Add crm verify command
+- low: hb_report: Fix spurious error on missing events.txt
+- medium: hb_report: Fix broken -S option (#137)
+- medium: ui_node: Fix crash in node fence command (bsc#974902)
+- low: scripts: Better description for sbd
+- low: scripts: Preserve formatting in description for vmware wizard
+- medium: scripts: Add vmware to data manifest (fate#318320)
+- high: scripts: VMware fencing using vCenter (fate#318320)
+- low: scripts: Shouldn't set -e here (fate#318320)
+- low: parse: Don't validate operation name in parser (bsc#975357)
+- low: constants: Add missing reload operation to parser
+- medium: ui_node: Fix "crm node fence" (bsc#974902) (#134)
+- low: corosync: Recycle node IDs when possible
+- low: scripts: Fix watchdog test in sbd-device (fate#318320)
+- low: scripts: Only print debug output locally unless there were remote actions
+- low: cibconfig: Don't mix up CLI name with XML tag
+- low: parser: ignore case for attr: prefix
+- medium: scripts: Use os.uname() to find hostname (#128)
+- low: history: Don't skip nodes without logs
+- low: logparser: Don't crash on nodes without logs
+- low: scripts: Need sudo if non-local call
+- medium: hb_report: Add timeout to SSH connection (bsc#971690)
+- low: scripts: Clean up various scripts
+- medium: main: Add -o|--opt to pass extra options for crmsh
+- low: command: handle stray regex characters in input
+- medium: scripts: SBD wizard which configures SBD itself (fate#318320)
+- medium: scripts: Add nfs-utils to list of packages for nfsserver
+- medium: scripts: Set sudo and full path for exportfs -v in nfs scripts
+- medium: scripts: Don't require sudo for root
+- medium: scripts: inline scripts for call actions
+- medium: scripts: Simplify SBD script (bsc#968076) (fate#318320)
+- low: logparser: Add cib info to __meta for hawk
+- low: hb_report: Suggest user checks timeframe on empty logs (bsc#970823)
+- medium: ui_node: Add crm node server command
+- medium: hb_report: Use server attribute for remote nodes if set (bsc#970819)
+- low: ui_resource: alias show to get
+- high: history: Faster log parsing (bsc#920278)
+- low: log_patterns_118: Add captures to log patterns for tagging (bsc#970278)
+- medium: crm_pssh: Fix live refresh of journalctl logs (bsc#970931)
+- low: hb_report: Warn if generated report is empty (bsc#970823)
+- low: hb_report: Print covered time span at exit (bsc#970823)
+- low: logtime: Improve performance of syslog_ts (bsc#970278)
+- low: scripts: Fix error in service action
+- low: history: use os.listdir to list history sessions
+- medium: ui_node: Use stonith_admin -F to fence remote nodes (bsc#967907)
+- low: ui_node: Less cryptic query when fencing node
+- low: config: Messed up previous fix (#119)
+- low: config: Clean up libdir configuration (#119)
+- medium: config: make multiarch dependency a dynamic include (#119)
+- high: ui_configure: Fix commit force (#120)
+- medium: hb_report: Don't collect logs on non-nodes (bsc#959031)
+- medium: ui_configure: Only wait for DC if resources were stopped (#117)
+- low: Fix title style vs. sentence style in cluster scripts (bsc#892108)
+- medium: command: Disable fuzzy matcher for completion (#116)
+- Merge pull request #115 from rikkotec/patch-queue/remove-fix-for-debian
+- medium: corosync: added optional parameter [name] to "corosync add-node" function
+- medium: constants: clone-min meta attribute (new in Pacemaker 1.1.14)
+- medium: cibconfig: add and|or filter combinators to influence filtering (fate#320401)
+- high: scripts: fix broken cluster init script (bsc#963135)
+- high: scripts: Add LVM on DRBD cluster script (bsc#951132)
+- high: scripts: Add NFS on LVM and DRBD cluster script (bsc#951132)
+- medium: ui_configure: Rename show-property to get-property
+- high: scripts: Improved OCFS2 cluster script (bsc#953984)
+- medium: scripts: Updated SBD cluster script
+- high: history: Parse log lines without timestamp (bsc#955581)
+
+* Fri Jan 15 2016 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 2.2.0
+- medium: history: Fix live report refresh (bsc#950422) (bsc#927414)
+- medium: history: Ignore central log
+- medium: cibconfig: Detect false container children
+- low: clidisplay: Avoid crash when colorizing None
+- medium: scripts: Load single file yml scripts
+- medium: scripts: Reformat scripts to simplified form
+- medium: ui_history: Add events command (bsc#952449)
+- low: hb_report: Drop function from event patterns
+- high: cibconfig: Preserve failure through edit (bsc#959965)
+- high: cibconfig: fail if new object already exists (bsc#959965)
+- medium: ui_cib: Call crm_shadow in batch mode to avoid spawning subshell (bsc#961392)
+- high: cibconfig: Fix XML import bug for cloned groups (bsc#959895)
+- high: ui_configure: Move validate-all validation to a separate command (bsc#956442)
+- high: scripts: Don't require scripts to be an array of one element
+- medium: scripts: Enable setting category in legacy wizards (bnc#957926)
+- high: scripts: Don't delete steps from upgraded wizards (bnc#957925)
+- high: ra: Only run validate-all if current user is root
+- high: cibconfig: Call validate-all action on agent in verify (bsc#956442)
+- high: script: Fix issues found in cluster scripts
+- high: ui_ra: Add ra validate command (bsc#956442)
+- low: resource: Fix unban alias for unmigrate
+- high: ui_resource: Add constraints and operations commands
+- high: ui_resource: Enable start/stop/status for multiple resources at once (bsc#952775)
+- high: scripts: Conservatively verify scripts that modify the CIB (bsc#951954)
+- high: xmlutil: Order is significant in resource_set (bsc#955434)
+- medium: scripts: Lower copy target to string
+- doc: configure load can read from stdin
+- medium: script: (filesystem) create stopped (bsc#952670)
+- medium: scripts: Check required parameters for optional sub-steps
+- high: scripts: Eval CIB text in correct scope (bsc#952600)
+- medium: utils: Fix python 2.6 compatibility
+- medium: ui_script: Tag legacy wizards as legacy in show (bsc#952226)
+- medium: scripts: No optional steps in legacy wizards (bsc#952226)
+- high: utils: Revised time zone handling (bsc#951759)
+- high: report: Fix syslog parser regexps (bsc#951759)
+- low: constants: Tweaked graph colours
+- high: scripts: Fix DRBD script resource reference (bsc#951028)
+- low: constants: Tweaked graph colors
+- medium: report: Make transitions without end stretch to 2525
+- high: utils: Handle time zones in parse_time (bsc#949511)
+- medium: hb_report: Remove reference to function name in event patterns (bsc#942906)
+- medium: ui_script: Optionally print common params
+- medium: cibconfig: Fix sanity check for attribute-based fencing topology (#110)
+- high: cibconfig: Fix bug with node/resource collision
+- high: scripts: Determine output format of script correctly (bsc#949980)
+- doc: add explanatory comments to fencing_topology
+- doc: add missing backslash in fencing_topology example
+- doc: add missing <> to fencing_topology syntax
+- low: don't use deprecated crm_attribute -U option
+- doc: resource-discovery for location constraints
+- high: utils: Fix cluster_copy_file error when nodes provided
+- low: xmlutil: More informative message when updating resource references after rename
+- doc: fix some command syntax grammar in the man page
+- high: cibconfig: Delete constraints before resources
+- high: cibconfig: Fix bug in is_edit_valid (bsc#948547)
+- medium: hb_report: Don't cat binary logs
+- high: cibconfig: Allow node/rsc id collision in _set_update (bsc#948547)
+- low: report: Silence tar warning on early stream close
+- high: cibconfig: Allow nodes and resources with the same ID (bsc#948547)
+- high: log_patterns_118: Update the correct set of log patterns (bsc#942906)
+- low: ui_resource: Silence spurious migration non-warning from pacemaker
+- medium: config: Always fall back to /usr/bin:/usr/sbin:/bin:/sbin for programs (bsc#947818)
+- medium: report: Enable opening .xz-compressed report tarballs
+- medium: cibconfig: Only warn for grouped children in colocations (bsc#927423)
+- medium: cibconfig: Allow order constraints on group children (bsc#927423)
+- medium: cibconfig: Warn if configuring constraint on child resource (bsc#927423) (#101)
+- high: ui_node: Show remote nodes in crm node list (bsc#877962)
+- high: config: Remove config.core.supported_schemas (bsc#946893)
+- medium: report: Mark transitions with errors with a star in info output (bsc#943470)
+- low: report: Remove first transition tag regex
+- medium: report: Add transition tags command (bsc#943470)
+- low: ui_history: Better error handling and documentation for the detail command
+- low: ui_history: Swap from and to times if to < from
+- medium: cibconfig: XML parser support for node-attr fencing topology
+- medium: parse: Updated syntax for fencing-topology target attribute
+- medium: parse: Add support for node attribute as fencing topology target
+- high: scripts: Add enum type to script values
+- low: scripts: [MailTo] install mailx package
+- low: scripts: Fix typo in email type verifier
+- high: script: Fix subscript agent reference bug
+- low: constants: Add meta attributes for remote nodes
+- medium: scripts: Fix typo in lvm script
+- high: scripts: Generate actions for includes if none are defined
+- low: scripts: [virtual-ip] make lvs_support an advanced parameter
+- medium: crm_pssh: Timeout is an int (bsc#943820)
+- medium: scripts: Add MailTo script
+- low: scripts: Improved script parameter validation
+- high: parse: Fix crash when referencing score types by name (bsc#940194)
+- doc: Clarify documentation for colocations using node-attribute
+- high: ui_script: Print cached errors in json run
+- medium: scripts: Use --no option over --force unless force: true is set in the script
+- medium: options: Add --no option
+- high: scripts: Default to passing --force to crm after all
+- high: scripts: Add force parameter to cib and crm actions, and don't pass --force by default
+- low: scripts: Make virtual IP optional [nfsserver]
+- medium: scripts: Ensure that the Filesystem resource exists [nfsserver] (bsc#898658)
+- medium: report: Reintroduce empty transition pruning (bsc#943291)
+- low: hb_report: Collect libqb version (bsc#943327)
+- medium: log_patterns: Remove reference to function name in log patterns (bsc#942906)
+- low: hb_report: Increase time to wait for the logmark
+- high: hb_report: Always prefer syslog if available (bsc#942906)
+- high: report: Update transition edge regexes (bsc#942906)
+- medium: scripts: Switch install default to false
+- low: scripts: Catch attempt to pass dict as parameter value
+- high: report: Output format from pacemaker has changed (bsc#941681)
+- high: hb_report: Prefer pacemaker.log if it exists (bsc#941681)
+- medium: report: Add pacemaker.log to find_node_log list (bsc#941734)
+- high: hb_report: Correct path to hb_report after move to subdirectory (bsc#936026)
+- low: main: Bash completion didn't handle sudo correctly
+- medium: config: Add report_tool_options (bsc#917638)
+- high: parse: Add attributes to terminator set (bsc#940920)
+- Medium: cibconfig: skip sanity check for properties other than cib-bootstrap-options
+- medium: ui_script: Fix bug in verify json encoding
+- low: ui_script: Check JSON command syntax
+- medium: ui_script: Add name to action output (fate#318211)
+- low: scripts: Preserve formatting of longdescs
+- low: scripts: Clearer shortdesc for filesystem
+- low: scripts: Fix formatting for SAP scripts
+- low: scripts: add missing type annotations to libvirt script
+- low: scripts: make overridden parameters non-advanced by default
+- low: scripts: Tweak description for libvirt
+- low: scripts: Strip shortdesc for scripts and params
+- low: scripts: Title and category for exportfs
+- high: ui_script: drop end sentinel from API output (fate#318211)
+- low: scripts: Fix possible reference error in agent include
+- low: scripts: Clearer error message
+- low: Remove build revision from version
+- low: Add HAProxy script to data manifest
+- medium: constants: Add 'provides' meta attribute (bsc#936587)
+- medium: scripts: Add HAProxy script
+- high: hb_report: find utility scripts after move (bsc#936026)
+- high: ui_report: Move hb_report to subdirectory (bsc#936026)
+- high: Makefile: Don't unstall hb_report using data-manifest (bsc#936026)
+- medium: report: Fall back to cluster-glue hb_report if necessary (bsc#936026)
+- medium: scripts: stop inserting comments as values
+- high: scripts: subscript values not required if subscript has no parameters / all defaults (fate#318211)
+- medium: scripts: Fix name override for subscripts (fate#318211)
+- low: scripts: Clean up generated CIB (fate#318211)
+
+* Sat Jun 13 2015 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Pre-release 2.2.0-rc3
+- high: Merge rewizards development branch (fate#318211)
+ (fate#318384) (fate#318483) (fate#318482) (fate#318550)
+
+- Summary of some of the changes included in the merge of
+ the rewizards branch:
+
+ + Colorized status output
+ + New and more capable cluster script implementation
+ + Deprecated the crmsh templates (not the CIB templates,
+ the configuration templates)
+ + Implemented a JSON API interface to the cluster scripts
+ for hawk to use instead of having its own wizards
+ + Handlebars-like templating language for cluster scripts
+ that modify the CIB
+ + Collect metadata from resource agents to avoid duplication
+ in configuration scripts
+ + Extended validation support for parameter values
+ + New cluster scripts:
+
+ - Stonith: SBD and libvirt
+ - Apache web server
+ - NFS server
+ - cLVM
+ - Databases: MySQL / MariaDB / Oracle / DB2
+ - SAP
+ - OCFS2
+ - etc.
+
+ + Radically simplified automake and autoconf setup
+ + Improved completion performance
+ + Added pygment lexers used by the history guide as stand-alone
+ python module in contrib/
+ + Removed dependency on corosync for regression test suite
+ + Sort topics and commands in help output
+ + Hide internal commands in help and ls
+ + Clearer debug output when simulating
+ + Cleaned up and fixed documentation bugs
+
+- high: cmd_status: Colorize status output
+- low: cmd_status: Add full argument to status
+- low: scripts: Handle local runs even if nodelist doesn't contain local node
+- low: scripts: Stricter regexp for identifiers
+- doc: Fix unterminated block
+- low: command: Hide internal commands from ls
+- low: script: Rename describe to show
+- doc: Document the script JSON API
+- low: handles: Clean up special values
+- medium: help: Sort topics and commands in help output
+- doc: scripts: Basic documentation for the cluster scripts
+- doc: Describe website compilation process in development.md
+- contrib: Add pygment lexers used by the history guide
+- build: Add update-data-manifest.sh to generate datadir file list
+- medium: ui_script: Add JSON API
+- medium: config: add config.path.hawk_wizards
+- medium: handles: Fix error in strict parameter handling
+- scripts: Add placeholders for some basic scripts
+- WIP: in-progress notes etc.
+- doc: Update reference to parallax in scripts documentation
+- low: handles: Also allow # and $ in identifiers
+- medium: handles: Replace magic value with callables
+- medium: handles: {{^feature}}invert blocks{{/feature}}
+- medium: resource: Add ban command
+- medium: ui_root: Make the cibstatus command available directly from the root
+- medium: hb_report: Collect logs from pacemaker.log
+- low: crm: Detect and report use of python 3
+- doc: Link to japanese translation of Getting Started
+- medium: crm_pkg: Fix cluster init bug on RH-based systems
+- medium: crm_gv: Improved quoting of non-identifier node names (bsc#931837)
+- medium: crm_gv: Wrap non-identifier names in quotes (bsc#931837)
+- low: Fix references to pssh to refer to parallax
+- medium: report: Try to load source as session if possible (bsc#927407)
+- low: xmlutil: Update comment to match the code
+- Merge pull request #91 from krig/missing-transitions
+- high: report: New detection to fix missing transitions (bnc#917131)
+- medium: ui_configure: Add resource as an alias for primitive
+- medium: parse: Allow implicit initial for groups as well
+- medium: parse: More robust implicit initial parser
+- doc: website: Embedded hawk video in announcement
+- doc: news: News update for 2.1.4
+- Merge pull request #95 from dmuhamedagic/history-guide
+- Medium: doc: add history guide
+- Low: doc: simplify to make it work with python 2.6
+- Medium: hb_report: use faster zypper interface if available
+- medium: ui_configure: Wait for DC when removing running resource
+- Merge pull request #94 from rikkotec/patch-queue/debian-multiarch-compat
+- Fix CFLAGS for supporting triplet paths with pacemaker
+- low: schema: Don't leak PacemakerError exceptions (#93)
+- high: ui_cluster: Add copy command
+- doc: Update the documentation for the upgrade command
+- parse: Don't require trailing colon in tag definitions
+- high: crm_pssh: Explicitly set parallax inline option (krig/parallax#1)
+- doc: Add quick links to website
+- high: ui_configure: Add show-property command
+- medium: utils: Allow 1/0 as boolean values for parameters
+- doc: Correct the URL to point to the new upstream repository
+- doc: Add announcement for release 2.1.3
+- low: hb_report: Use crmsh config to find pengine/cib dirs (bsc#926377)
+- low: ui_options: add alias list for show
+- medium: cliformat: Escape double-quotes in nvpair values
+- high: parse: Don't allow constraints without applicants
+- medium: parse: Disallow location rules without resources
+- medium: ui_template: Make new command more robust (bnc#924641)
+- high: fix typo in previous commit
+- high: ui_node: Don't fence node in clearstate (boo#912919)
+- low: Replaced README with README.md
+- medium: ui_template: Always generate id unless explicitly defined (boo#921028)
+- high: cibconfig: Derive id for ops from referenced resource name (boo#921028)
+- medium: templates: Clearer descriptions for editing templates (boo#921028)
+- high: ui_context: Wait for DC after commit, not before (#85)
+- high: cibconfig: Don't delete valid tickets when removing referenced objects (bnc#922039)
+- high: ui_configure: Remove acl_group command (bnc#921056)
+- doc: Document changes to template list|new
+- medium: help: Teach help to fuzzy match topics
+- doc: Describe the shorthand syntax for commands
+- low: command: Use fuzzy match for sublevel check
+- medium: command: Fuzzy match command names
+- low: ui_context: Use true command name when reporting errors
+- doc: Move the main crmsh repository to the ClusterLabs organization on github
+- Merge pull request #82 from dmuhamedagic/sync_hb_report
+- Low: hb_report: add -X option for extra ssh options
+- Merge pull request #81 from lge/for-krig
+- fix: catch exception if schema file does not exist
+- low: allow pacemaker 1.0 version detection
+- low: allow (0,1) as option booleans
+- medium: cibconfig: Allow removal of non-existing elements if --force is set
+- medium: cibconfig: Allow delete of objects that don't exist without returning error code
+- medium: cibconfig: If a change results in no diff, exit silently
+- low: pacemaker: Remove debug output
+- medium: schema: Remove extra debug output
+- medium: schema: Test if node type is optional via schema
+- medium: parse: Treat pacemaker-next schema as 2.0+
+- low: cibconfig: Improved debug output when schema change fails
+- medium: cibconfig: Fix inverted logic causing spurious warning
+- Merge pull request #80 from dmuhamedagic/schema-update
+- Medium: cibconf: preserve cib user attributes
+- medium: ra: Handle non-OCF agent meta-data better
+- medium: config: Fix case-sensitivity for booleans
+- medium: report: Include transitions with configuration changes (bnc#917131)
+- medium: xmlutil: Improved check for related elements
+- doc: Documentation for show related:<obj>
+- medium: report: Convert RE exception to simpler UI output
+- medium: cibconfig: add show related:<obj>
+- doc: Add link to clusterlabs.org
+- medium: parse: Encode unicode using xmlcharrefreplace in parser
+- medium: parse: nvpair attributes with no value = <nvpair name=".."/> (#71)
+- medium: ui_cluster: Add diff command (bnc#914525)
+- doc: website: Fix changelog in news entry
+- doc: website: Add news release for 2.1.2
+- medium: report: Fall back to end_ts = start_ts
+- medium: util: Don't fall back to current time
+- high: xmlutil: Treat node type=member as normal (boo#904698)
+- low: xmlutil: logic bug in sanity_check_nvpairs
+- medium: xmlutil: Modify sort order of object types
+- medium: cibconfig: Use orderedset to avoid reordering bugs (#79)
+- medium: orderedset: Add OrderedSet type
+- medium: cibconfig: Detect v1 format and don't patch container changes (bnc#914098)
+- medium: constants: Update transition regex (#77)
+- Revert "high: xmlutil: Reorder elements only if sort_elements is set (#78)"
+- low: ui_options: Add underscore aliases for legacy options
+- high: xmlutil: Reorder elements only if sort_elements is set (#78)
+- medium: cibconfig: Strip digest from v1 diffs (bnc#914098)
+- Merge pull request #77 from krig/mail-patchset
+- medium: crm_pssh: Make tar follow symlinks
+- medium: constants: Fix transition start detection
+- medium: crm_pssh: Handle incomplete Option argument
+- high: crm_pssh: Use correct Task API in do_pssh (bnc#913261)
+- medium: cibconfig: Break infinite edit loop if --force is set
+- Merge pull request #76 from dmuhamedagic/log-patterns
+- high: utils: Locate binaries across sudo boundary (bnc#912483)
+- low: config: Convert NoOptionError to ValueError
+- low: msg: Add note on modifying supported schemas
+- medium: config: Add 2.3 to list of supported schemas
+- medium: utils: crm_daemon_dir is added to PATH in envsetup (#67)
+
+* Fri Jan 9 2015 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- medium: ui_resource: Set probe interval 0 if not set (bnc#905050)
+- doc: Document probe op in resource trace (bnc#905050)
+- low: ui_resource: --reprobe and --refresh are deprecated (bnc#905092)
+- doc: Document deprecation of refresh and reprobe (bnc#905092)
+- medium: parse: Support resource-discovery in location constraints
+- medium: pacemaker: Support pacemaker-next as schema
+- medium: cibconfig: Allow unsupported schemas with warning
+- medium: ra: Use correct path for crmd (#67)
+- medium: cmd_status: Show pending if available, enable extra options
+- high: config: Fix path to system-wide crm.conf (#67)
+- medium: config: Fall back to /etc/crm/crmsh.conf (#67)
+- low: cliformat: Colorize id: as identifier (boo#905338)
+- medium: cibconfig: Revised CIB schema handling
+- medium: ui_configure: Add replace option to commit
+- medium: cibconfig: Don't bump epoch if stripping version
+- medium: ui_context: Lazily import readline
+- medium: ui_configure: selectors in save command
+- medium: config: Add core.ignore_missing_metadata (#68) (boo#905910)
+- Medium: config: add alwayscolor to display output option
+- doc: Clarify documentation for property (boo#905637)
+- doc: Add documentation section describing rule expressions (boo#905637)
+- doc: Link to documentation on rule expressions
+- medium: Allow removing groups even if is_running (boo#905271)
+- medium: cibconfig: Delete containers first in edits (boo#905268)
+- doc: Improved documentation for show and save
+- doc: Add note about modeline for vim syntax
+- medium: ui_history: Fix crash using empty object set
+- utils: append_file: open destination in append-mode (boo#907528)
+- medium: parse: Allow nvpair with no value using name= syntax (#71)
+- medium: parse: Enable name[=value] for nvpair (#71)
+- Low: term: get rid of annying ^O in piped-to-less-R output
+- high: parse: Implicit initial parameter list
+- high: crm_pssh: Switch to python-parallax over pssh (bnc#905116)
+- low: report: Fix references to PSSH
+- low: report: Delay Report creation until use
+- medium: utils: Check if path basename is less (#74)
+- medium: ui_options: Accept prefix or suffix of option as argument
+- medium: Remove CIB version in case no --no-version.
+- low: cibconfig: Use LXML to remove version data more robustly (#75)
+- low: crm_gv: Avoid crashing if passed None in my_edge
+- low: cibconfig: Protect against dereferencing None when building graph
+
+* Tue Oct 28 2014 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Pre-release 2.2.0-rc1
+- cibconfig: Clean up output from crm_verify (bnc#893138)
+- high: constants: Add acl_target and acl_group to cib_cli_map (bnc#894041)
+- medium: cibconfig: Add set command
+- doc: Rename asciidoc files to %.adoc
+- high: parse: split shortcuts into valid rules
+- medium: Handle broken CIB in find_objects
+- high: scripts: Handle corosync.conf without nodelist in add-node (bnc#862577)
+- low: template: Add 'new <template>' shortcut
+- low: ui_configure: add rm as alias for delete
+- low: ui_template: List both templates and configs by default
+- medium: config: Assign default path in all cases
+- low: main: Catch any ValueErrors that may leak through
+- doc: Update TODO
+- low: corosync: Check tools before use
+- low: ui_ra: Don't crash when no OCF agents installed
+- low: ra: Add systemd-support to RaOS
+- doc: Updated documentation
+- doc: Handle command names with underscore
+- doc: Add tool to sort command list in documentation
+- doc: Sort command list in documentation alphabetically
+- high: cibconfig: Generate valid CLI syntax for attribute lists (bnc#897462)
+- high: cibconfig: Add tag:<tag> to get all resources in tag
+- low: report: Sort list of nodes
+- low: ui_cluster: More informative error message
+- low: main: Replace getopt with optparse
+- high: parse: Allow empty attribute values in nvpairs (bnc#898625)
+- high: ui_maintenance: Add maintenance sublevel (bnc#899234)
+- medium: rsctest: Add basic support for systemd services
+- medium: ui_maintenance: Combine action and actionssh into a single command
+- low: rsctest: Better error message for unsupported action
+- low: cibconfig: Improve wording of commit prompt
+- high: cibconfig: Delay reinitialization after commit
+- doc: Add website template for the nongnu page
+- medium: main: Disable interspersed args
+- low: cibconfig: Fix vim modeline
+- high: report: Find nodes for any log type (boo#900654)
+- high: hb_report: Collect logs from journald (boo#900654)
+- doc: Clarified note for default-timeouts
+- doc: Remove reference to crmsh documentation at clusterlabs.org
+- doc: start-guide: Fix version check
+- medium: xmlutil: Use idmgmt when creating new elements (bnc#901543)
+- doc: cibconfig: Add note on inner ids after rename
+- high: cibconfig: Don't crash if given an invalid pattern (bnc#901714)
+- high: xmlutil: Filter list of referenced resources (bnc#901714)
+- medium: ui_resource: Only act on resources (#64)
+- medium: ui_resource: Flatten, then filter (#64)
+- high: ui_resource: Use correct name for error function (bnc#901453)
+- high: ui_resource: resource trace failed if operation existed (bnc#901453)
+
+* Mon Jun 30 2014 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- Release 2.1
+- Add atom feed to development page
+- Medium: hb_report: dot is not illegal in file names (bnc#884079, debian#715391)
+- Low: history: remove existing report directory on refresh
+- medium: ui_history: Print source if given no argument (bnc#883437)
+- Medium: hb_report: update interface to zypper (bnc#883186)
+- Medium: hb_report: support logs with varied timestamps (bnc#883186)
+- Low: hb_report: getstampproc is global (bnc#883186)
+- Low: hb_report: gdb debug symbols output change (bnc#883186)
+- Low: hb_report: don't restrict debuginfo to cluster stack binaries (zypper) (bnc#883186)
+- high: ui_history: Lazily fetch report data on command (bnc#882959)
+- medium: report: Make setting report period more robust (bnc#882959)
+- medium: ui_resource: Remove empty attrlists when overriding children (bnc#882655)
+- high: cibconfig: Retain empty attribute sets (bnc#882655)
+- Low: report: unpack tarball if it's newer than the existing directory
+- Low: report: get node list based on collected logs, not from cib
+- Low: report: test for ha-log.txt instead of cib.txt when listing nodes
+- Low: report: don't warn on extra nodes in the report
+- medium: ui_configure: Nicer error when pacemaker is not running (bnc#882475)
+- medium: scripts: configure SSH in cluster init (bnc#882476)
+- medium: ui_assist: add template command (bnc#882477)
+- medium: cliformat: Fix CLI formatting for rules and id-refs
+- doc: Update documentation for location constraints (bnc#873781)
+- doc: Document interval suffixes (bnc#873677)
+- medium: ui_node: Fix display of node attributes
+- medium: parse: Allow remote as node type
+- low: cliformat: Don't show extraneous id for acl rules
+- high: cibconfig: Fix bug when copying nvpairs (bnc#881369)
+- high: parse: Try to retain ordering if possible (bnc#880371)
+- high: cibconfig: Enable use of v2 patches in Pacemaker (bnc#880371)
+- medium: pacemaker: Don't hardcode list of supported schemas
+- Medium: resource: modify some command wait options (bnc#880982)
+- high: parse: Support for ACL schema 2.0 (bnc#880371)
+- medium: schema: Fix typo in test_schema()
+- medium: parse: Allow empty property sets (bnc#880632)
+- medium: ui_resource: Also trace promote/demote for multistate resources
+- medium: ui_resource: allow trace of resource without specific operation
+- medium: ui_resource: Make op an optional argument to trace/untrace
+- low: ui_resource: Allow untrace without explicit interval
+- high: cibconfig: adjust attributes when adding operations (bnc#880052)
+- high: parse: Support id-ref in nvpairs (fate#316118)
+- low: ui_configure: Add --force flag to configure delete
+- medium: xmlutil: Limit xpath search to children (bnc#879419)
+- medium: ui: Fix argument check in resource commands (gh#crmsh/crmsh#29)
+- high: xmlutil: Include remote nodes in nodelist (bnc#878112)
+- medium: cibconfig: Detect broken child relationship (bnc#878112)
+- high: cibconfig: Ban containers stealing children (bnc#878112)
+- low: command: Add -h and --help as aliases to help
+- high: parse: Allow role in rule-based location constraints (bnc#878128)
+- medium: report: Return to handling timestamps internally (bnc#877495)
+- medium: ui_resource: Fix race in start/stop/manage/unmanage (bnc#877640)
+- medium: parse: Allow empty attribute lists
+- medium: cibconfig: Fix uses of add_operation
+- medium: report: Make regexp groups non-capturing to avoid limit (bnc#877484)
+- medium: doc: Document rules in attribute lists (bnc#865292)
+- medium: constants: Rename cluster attribute to cluster-name (fate#316118)
+- medium: idmgmt: Fix id assignment and update regression tests (bnc#865292)
+- medium: cibconfig: Enable score for instance_attributes (bnc#865292)
+- high: cibconfig: Support rules in attribute lists (bnc#865292)
+- low: cibconfig: Better error when referring to non-existant template
+- medium: scripts: Handle percent characters in script output (bnc#876882)
+- pacemaker: Support 2.0 schema
+- vars: Rename property: s/site/cluster (fate#316118)
+- Medium: hb_report: fix ssh passwords again (bnc#867365)
+- vars: Add site to list of extra cluster properties (fate#316118)
+- parse: Fix check for action/role in resource set parser (#14)
+- report: More problems with datetime (bnc#874162)
+- report: Resolve datetime/timestamp mixup (bnc#874162)
+- utils: Handle datetime objects in shorttime/shortdate (bnc#874162)
+- main: Fix reference before assignment (#7)
+- crm: Check and complain about python version < 2.6 (#11)
+- parse: Unify API for err(), fix error
+- Fix garbage characters in prompt (issue#7)
+- Medium: cibconf: add comments in the right order (bnc#866434)
+- site: pass --force flag through to crm_ticket (bnc#873200)
+- Low: report: Use subsecond precision if possible (bnc#872932)
+- Low: hb_report: pcmk lib changed permissions (bnc#872958)
+- Low: history: set colours for all nodes found (bnc#872936)
+- ui_resource: Allow setting meta attributes on tags (fate#315101)
+- ui_configure: tag command (fate#315101)
+- parse: Support cib object tags (fate#315101)
+- cibconfig: Support filename-style globs in show/edit (bnc#864346)
+- ui_resource: Only search in top-level (bnc#865024)
+- ui_resource: Don't create extra nvpairs (bnc#865024)
+- utils: Don't crash on missing reply to y/n question
+- Allow building crmsh without PyYAML
+- Support for pacemaker-1.3 RNG schema
+
+* Thu Apr 4 2014 Kristoffer Grönlund <kgronlund@suse.com> and many others
+- release 2.0
+- Improve output from history explorer when using a crm_report-generated
+ report (bnc#870886)
+- Add journal.log to interesting log files (bnc#870886)
+- make sanity check of node name not case sensitive
+- hb_report: Don't use deprecated ifconfig (bnc#871089)
+- parse: Clean up the CLI syntax display
+- ra: display without class:provider: prefix if possible
+- Better args error handling in configure load/save (bnc#870654)
+- ui_context: Correctly check end_game() return value (bnc#868533)
+- command: Propagate error from auto-commit (bnc#868533)
+- crm_pkg: Add --no-refresh to zypper commands
+- scripts: configure firewall to open cluster ports (bnc#868008)
+- scripts: Improved debug output from cluster scripts (bnc#866636)
+- main: Better descriptions for -d and -R flags.
+- utils: Nicer warning when crm_simulate fails
+- ui: Don't call nonexistent function on unsupported cluster stack
+- xmlutil: fencing-topology used broken comparison (bnc#866639)
+- parse: More liberal parsing of role assignment in constraint rules
+- scripts: corosync uses mcastport - 1 (bnc#868008)
+- utils: ask() did not respect force flag in all cases (bnc#868007)
+- xmlutil: Compare attribute dictionaries properly
+- xmlutil: Fix attribute handling in XML comparison function
+- xmlutil: Fix sorting of attribute keys in xml_cmp
+- xmlutil: Sanitize the CIB a bit less aggressively (bnc#866434)
+- xmlutil: in xml_cmp, s/print/common_debug/
+- xmlutil: Handle XML comments properly in xml_cmp
+- xmlutil: order-independent XML comparison (bnc#866434)
+- scripts: don't modify system unless necessary (bnc#866569)
+- xmlutil: don't crash on degenerate colocations
+- scripts: enable trace logging for cluster scripts (bnc#866636)
+- ui_cluster: use crm_mon -bD1 in wait_for_cluster (bnc#866635)
+- scripts: Disable corosync.log by default (bnc#866569)
+- scripts: Open appropriate ports in firewall (bnc#866569)
+- scripts: Configure quorum based on node count (bnc#866569)
+- utils: Record all calls in regression test output (bnc#862383)
+- ui_resource: Add maintenance command (bnc#863071)
+- parse: Fix resource sets, once and for all (savannah#41617)
+- scripts: Disable strict host key checking (bnc#864238)
+- hb_report: Fix incorrect quotes (bnc#863816)
+- cibconfig: do not format xml tags when requested (bnc#863746)
+- cibconfig: Handle non-string arguments (bnc#863736)
+- ui_root: Rename root level to 'root' (bnc#863583)
+- corosync: Allow tabs in corosync.conf (bnc#862577)
+- parse: Fix sequential=true for resource sets (bnc#862334)
+- cibconfig: fencing_topology warning with stonith templates
+ (savannah#41414)
+- xmlutil: rsc_template has no provider attribute (savannah#41410)
+- ra: Infer provider from RA name (bnc#860754)
+- ui_options: add missing documentation for options set (bnc#860585)
+- ui_cib: correct name of cib import (bnc#860584)
+- ui_ra: Fix problems with ra info command (bnc#860583)
+- ui_resource: Fix crash in resource cleanup (bnc#859570)
+- ui_assist: Add assist sublevel (fate#314917)
+- hb_report: Show progress when processing many transitions
+- report: Open reports output by crm_report (fate#316330)
+- hb_report: Display as 'report'
+- report: Move report creation to root
+- ui_report: Fix invocation of hb_report
+- hb_report: call corosync-blackbox, not corosync-fplay
+- help: Bug in delayed loading of help text
+- corosync: Better parser and more commands
+- scripts: Set PasswordAuthentication=no
+- ui_resource: Fix bug in resource restart
+- ui_cluster: Revised cluster status
+- msg: Don't print ok/info to stderr
+- ui_script: Allow --nodes='..', not only --nodes '..'
+- scripts: Cluster scripts (fate#316464, fate#309206, fate#316332)
+- config: Validate boolean values correctly
+- main: Seed random generator on startup
+- main: More informative error on start failure
+- cluster: Use crm_node -l for node list
+- crm_pssh: Limit scope of glob in pssh/get_output
+- ui_context: Less repetitive error message on unknown command
+- ui_cib: Fix typo in sublevel name: cib.cibconfig -> cib.cibstatus
+- help: Return error if help topic is not found (bug#40821)
+- main: Return more useful error codes
+- crm_gv: Support rsc_template in graphs (bnc#850159)
+- cibconfig: Updated fix for configure load method (bnc#841764)
+- parse: Correct recognition of kind in order constraints
+- history: Fix incorrect argument to level check
+- report: Fix broken call to hb_report
+- parse: Stricter parsing of resource names
+- parse: Resource sets in location constraints (fate#315158).
+- utils: Enable cibadmin -P for 1.1.11
+- parse: rsc_template is not recognized by parser (bnc#854562)
+- vars: Add remote-node as resource attribute (bnc#854552)
+- cibconfig: Add missing config import
+- hb_report: Prefer generating .bz2 archives (bnc#854060)
+- hb_report: Add support for xz compression (bnc#854060)
+- cluster: Implement run using pssh
+- ui_cluster: Cluster sublevel implementation
+- configure: Improved completion for group, clone, ms (bnc#845339)
+- config: Set OCF_ROOT in environ structure (used by ra.py)
+- main: Tab completion for multi-line statements BUG: bnc#845339
+- bash_completion: Add completion installation to spec file
+- ui_resource: Added new resource scores command
+- command: Improved default help for commands
+- crm_gv: Limit graph size to fit on A4
+- config: New configuration file format
+- parse: Support role= in rsc_location
+- msg: Add colors to message output
+- templates: Update OCFS2 template.
+- ui_context: Fix readline completion for empty input
+- ui_configure: Clearer error messages
+- ui_context: Wait if in transit
+- ui_configure: Clearer error messages
+- Enable colorized prompt
+- ui_context: Allow ui stack modifications
+- ui_configure: Completion + help for primitive
+- ui_context: Fix completion with no args to command
+- command: Fix case with no args to completer
+- ui_context: Improve completion
+- ui_ra: Updated completion for info
+- main.compgen: Adapt output to bash completion
+- bash_completion: Improve colon-handling
+- main: Fix issues with ctrl+C and profiling
+- ui_options: add option to print single user preference values
+- bash_completion: fix path to crm
+- Clean up contextual_help
+- Fix help with no argument
+- ui_context: Allow commands that manipulate the stack
+- ui_context: Fix stack handling
+- ui_configure: Add missing return statement
+- Check if command failed
+- Initial bash completion / completion framework
+- Add report level to wrap crmsh_hb_report
+- UI makeover
+- help: Rewritten help subsystem
+- hb_report: exit early if which(1) is missing
+- ui: anonymous temporary shadow CIBs
+- cibconf: fix two fencing top issues (savannah#40173)
+- node: clear state new way since pcmk 1.1.8 (bnc#843699)
+- Integrate hb_report as part of crmsh
+
+* Tue Sep 24 2013 Kristoffer Grönlund <kgronlund@suse.com>, Dejan Muhamedagic <dejan@suse.de>, and many others
+- release 1.2.6
+- cibconf: fix removing cluster properties in edit (bnc#841764)
+- history: improve setting history source
+- cibconf: fix rsc_template referencing (savannah#40011)
+- rsctest: add support for STONITH resources
+- help: fix help for alias commands
+- history: show and allow completion of all primitives and not only
+ top level resources such as groups
+- site: add missing completions
+- rsctest: fix multistate resource testing
+- site: add missing command aliases
+
+* Wed Aug 28 2013 Dejan Muhamedagic <dejan@suse.de> and many others
+- release candidate 1.2.6-rc3
+- cibconf: disable atomic updates until cibadmin gets fixed
+- cibconf: match special ids on configuration edit (fixes
+ disappearing elements on edit)
+- doc: website sources
+
+* Mon Aug 5 2013 Dejan Muhamedagic <dejan@suse.de> and many others
+- release candidate 1.2.6-rc1
+- main: allow starting with a specified CIB shadow
+- main: make sure that tmp files get removed
+- cibconf: replace minidom with lxml
+- cibconf: groups can have the container meta attribute
+- cibconf: do not load CIB automatically in a non-interactive
+ mode (bnc#813045)
+- cibconf: allow single level fencing_topology (savannah#38737)
+- cibconf: improve exit code if a referenced element does not
+ exist (e.g. in the show command)
+- cibconf: add simulate alias for the ptest command
+- cibconf: add -S when running crm_simulate (formerly ptest)
+- cibconf: use cibadmin patch to update live CIB (with pcmk >= 1.1.10)
+- cibconf: node ids are not id but text
+- cibconf: improve elements edit operation
+- resource: trace and untrace (RA) commands
+- resource: prevent whitespace in meta_attributes when setting
+ attributes in nested elements such as groups (bnc#815447)
+- resource: add option for better control of group management
+ (bnc#806901)
+- node/resource: improve lifetime processing
+- node: update interface to crm_node, its usage changed
+ (bnc#805278)
+- node: maintenance/ready commands
+- node: ignore case when looking up nodes
+- node: update interface to crm_node (node delete)
+- node: allow forced node removal
+- shadow: fix regression in cib import (from PE file)
+- shadow: set shadow directory according to the user preference
+- history: fix search for resource messages (bnc#803790)
+- history: refresh live report for commands other than info
+ (bnc#807402)
+- history: use anonymous re groups to prevent out of groups assertion
+- history: fix xpath expression for graphs of resource sets
+- history: skip empty lines (!) when searching logs
+- history: add support for rfc5242 date format in syslog
+- userprefs: add reset command
+- ui: fix exit code of crm status if crm_mon fails (savannah#38702)
+- ui: fix exit code of the help command
+- parse: drop obsolete test for operations
+- performance: do not make unnecessary parameter uniqueness test
+ (bnc#806372)
+- performance: check programs existence with python os module
+ (bnc#806372)
+- performance: improve tests for running resources
+
+* Wed Feb 6 2013 Dejan Muhamedagic <dejan@suse.de> and many others
+- stable release 1.2.5
+- cibconfig: modgroup command
+- cibconfig: directed graph support
+- cibconfig: fix syntax error in ptest
+- history: diff command (between PE inputs)
+- history: show pe commands
+- history: graph command
+- history: reduce number of live updates
+- history: inherit year from the report
+
+* Mon Dec 17 2012 Dejan Muhamedagic <dejan@suse.de> and many others
+- stable release 1.2.4
+- shadow: return proper exit code on I/O errors
+- history: implement transition save (to shadow) subcommand
+- history: fix regression when creating log objects
+- history: detailed transition output
+- history: force refresh on session load
+
+* Tue Dec 11 2012 Dejan Muhamedagic <dejan@suse.de> and many others
+- stable release 1.2.3
+- ra: don't print duplicate RAs in the list command (bnc#793585)
+- history: optimize source refreshing
+
+* Thu Dec 6 2012 Dejan Muhamedagic <dejan@suse.de> and many others
+- stable release 1.2.2
+- cibconfig: don't bail out if filter fails
+- cibconfig: improve id management on element update
+- ra: add support for nagios plugins
+- utils: make sure that there's at least one column (savannah#37658)
+- ui: improve quotes insertion (possible regression)
+- history: adjust log patterns for pacemaker v1.1.8
+- history: fix setting up the timeframe alias for limit
+- history: fix unpacking reports specified without directory
+- history: add log subcommand to transition
+- build: pcmk.pc renamed to pacemaker.pc in pacemaker v1.1.8
+
+* Mon Oct 15 2012 Dejan Muhamedagic <dejan@suse.de> and many others
+- stable release 1.2.1
+- cibconfig: show error message on id in use
+- cibconfig: repair edit for non-vi users
+- cibconfig: update schema separately (don't remove the status section)
+- cibconfig: node type is optional now
+- ui: readd quotes for single-shot commands
+- ra: manage without glue installed (savannah#37560)
+- ra: improve support for RH fencing-agents
+- ra: add support for crm_resource
+- history: remove keyword 'as' which is not compatible with python
+ 2.4 (savannah#37534)
+- history: add the exclude (log messages) command
+- history: pacemaker 1.1.8 compatibility code
+- utils: exit code of cibadmin -Q on no section changed in 1.1.8
+- some more pacemaker 1.1.8 compatibility code
+
+* Tue Sep 18 2012 Dejan Muhamedagic <dejan@suse.de> and many others
+- stable release 1.2.0
+- cibconfig: support the kind attribute in orders
+- cibconfig: implement node-attribute in collocations
+- cibconfig: support require-all in resource sets
+- cibconfig: support for fencing-topology
+- cibconfig: new schema command
+- rsctest: resource testing
+- history: implement session saving
+- history: add alias (timeframe) for the limit command
+- xml: support for RNG schema
+- site: ticket standby and activate commands
+- site: update interface to crm_ticket
+- cibstatus: ticket management
+- ui: add vim syntax highlighting support
+- xml: retrieve data from schema (lf#2092)
+- stonith: support rhcs fence-agents (bnc#769724)
+- ticket: fix redirecting rsc references in tickets (bnc#763465)
+- ui: import readline only when needed (don't print ".[?1034h")
+- ui: don't accept non-ascii input (lf#2597)
+- ui: enable wait (option -w) for single-shot configure commands
+- shadow: calculate shadow directory just like crm_shadow (bnc#759056)
+- utils: improve terminal output height calculation (pager)
+- utils: use crm_simulate if ptest is not available
+- utils: repair ptest usage (bnc#736212)
+- utils: prevent spurious error messages if an element doesn't
+ exist in CIB (bnc#723677)
+- cibconfig: drop attributes set to default on cib import
+- cibconfig: support setting attributes in resource sets
+- cibconfig: display referenced attr set ids (lf#2304)
+- cibconfig: don't verify parameters starting with '$'
+- cibconfig: fix meta attributes verify for container elements (lf#2555)
+- cibconfig: test for duplicate monitor intervals (lf#2586)
+- cibconfig: don't skip monitor operations on verify
+- cibconfig: use uname instead of id when listing nodes (cl#5043)
+- cibconfig: repair resource parameter uniqueness test
+- cibconfig: repair ability to manage multiple rsc/op_defaults (bnc#737812)
+- cibconfig: remove also elements which depend on the resource
+ template which is to be deleted (bnc#730404)
+- cibconfig: report error if a referenced template in primitive
+ doesn't exist (bnc#730404)
+- cibconfig: exchange rsc and with-rsc after converting collocation
+ sets to standard constraints (bnc#729628)
+- cibconfig: convert resource sets to standard constraints on
+ resource removal (bnc#729628)
+- ra: don't require certain parameters for rhcs stonith resources
+- ra: use only effective UID when choosing RA interface
+- ra: always use lrmadmin with glue 1.0.10 (cl#5036)
+- ra: fix start/stop interval test
+- completion: add command aliases to completion tables (cl#5013)
+- completion: add templates as possible resource references in
+ constraints
+- history: improve limiting the report time period
+- history: tune resource match patterns
+- history: reset time period when setting source
+- history: add clone/ms resources to events (fixes the transition command)
+- history: expand clones and ms in the resource command (bnc#729631)
+- history: don't assume that a hb_report tarball name matches the
+ top directory name
+- history: handle non-existing source better (bnc#728346)
+- history: fix regression when fetching new PE inputs (bnc#723417)
+- history: use debug severity for repeating messages (bnc#726611)
+- help: page overview help screens
+- help: append slash to levels in overview help screen
+- help: add '?' as alias for help
+- help: add topics to the help system
+- doc: describe deficiency in the configure edit command (bnc#715698)
+- move user files to standard locations (XDG)
+- build: add optional regression testing on rpm build
+- build: fetch the daemon location from glue-config.h
+
+* Wed Oct 19 2011 Dejan Muhamedagic <dejan@suse.de> and many others
+- stable release 1.1.0
+- history/troubleshooting support
+- template support
+- geo-cluster support commands
+- support for configure rsc_ticket
+- support for LRM secrets at the resource level
+- enable removal of unmanaged resources (bnc#696506)
+- split-off from Pacemaker after release 1.1.6
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..0af5a6c
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,81 @@
+#
+# crmsh
+#
+# Copyright (C) 2015 Kristoffer Gronlund
+# Copyright (C) 2008 Andrew Beekhof
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure
+
+# in .spec, set --sysconfdir=/etc
+
+# Documentation
+doc_DATA = AUTHORS COPYING README.md ChangeLog $(generated_docs)
+crmconfdir=$(sysconfdir)/crm
+crmconf_DATA = etc/crm.conf etc/profiles.yml
+contribdir = $(docdir)/contrib
+contrib_DATA = contrib/pcmk.vim contrib/README.vimsyntax
+helpdir = $(datadir)/$(PACKAGE)
+asciiman = doc/crm.8.adoc doc/crmsh_crm_report.8.adoc doc/profiles.adoc
+help_DATA = doc/crm.8.adoc
+
+generated_docs =
+generated_mans =
+if BUILD_ASCIIDOC
+generated_docs += $(ascii:%.adoc=%.html) $(asciiman:%.adoc=%.html)
+generated_mans += $(asciiman:%.8.adoc=%.8)
+$(generated_mans): $(asciiman)
+man8_MANS = $(generated_mans)
+endif
+
+%.html: %.adoc
+ $(ASCIIDOC) --unsafe --backend=xhtml11 $<
+
+%.8: %.8.adoc
+ a2x -f manpage $<
+
+# Shared data files
+install-data-hook:
+ mkdir -p $(DESTDIR)$(datadir)/@PACKAGE@/; \
+ for d in $$(cat data-manifest); do \
+ install -D -m $$(test -x $$d && echo 0755 || echo 0644) $$d $(DESTDIR)$(datadir)/@PACKAGE@/$$d; done; \
+ mv $(DESTDIR)$(datadir)/@PACKAGE@/test $(DESTDIR)$(datadir)/@PACKAGE@/tests; \
+ cp test/testcases/xmlonly.sh $(DESTDIR)$(datadir)/@PACKAGE@/tests/testcases/configbasic-xml.filter
+
+# Python module installation
+all-local:
+ (cd $(srcdir); $(PYTHON) setup.py build \
+ --build-base $(shell readlink -f $(builddir))/build \
+ --verbose)
+
+python_prefix = --prefix=$(prefix)
+
+install-exec-local:
+ $(INSTALL) -d -m 770 $(DESTDIR)/${localstatedir}/log/crmsh
+ -mkdir -p $(DESTDIR)$(pkgpythondir)
+ $(PYTHON) $(srcdir)/setup.py install \
+ --root $(DESTDIR)/// \
+ $(python_prefix) \
+ --record $(DESTDIR)$(pkgpythondir)/install_files.txt \
+ --verbose
+ $(INSTALL) -d -m 770 $(DESTDIR)$(CRM_CACHE_DIR)
+
+uninstall-local:
+ cat $(DESTDIR)$(pkgpythondir)/install_files.txt | xargs rm -rf
+ rm -rf $(DESTDIR)$(pkgpythondir)
+
+dist-clean-local:
+ rm -f autoconf automake autoheader
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/NEWS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..c8c3892
--- /dev/null
+++ b/README.md
@@ -0,0 +1,115 @@
+# crmsh
+
+[![Build Status](https://github.com/ClusterLabs/crmsh/actions/workflows/crmsh-ci.yml/badge.svg)](https://github.com/ClusterLabs/crmsh/actions/workflows/crmsh-ci.yml)
+
+
+## Introduction
+
+crmsh is a command-line interface for High-Availability cluster
+management on GNU/Linux systems, and part of the Clusterlabs
+project. It simplifies the configuration, management and
+troubleshooting of Pacemaker-based clusters, by providing a powerful
+and intuitive set of features.
+
+crmsh can function both as an interactive shell with tab completion
+and inline documentation, and as a command-line tool. It can also be
+used in batch mode to execute commands from files.
+
+## Documentation
+
+* The website for crmsh is here: [crmsh @ Github.io](http://crmsh.github.io).
+* Documentation for the latest stable release is found at the [Github.io documentation](http://crmsh.github.io) page.
+
+## Installation
+
+Crmsh is implemented in Python, and requires Python version 3.4 or
+newer. Versions of crmsh older than the 4 series ran on Python 2, so
+if you don't have access to a Python 3 interpreter, you will need to
+use one of the older releases.
+
+The GNU Autotools suite is used to configure the OCF root directory,
+the Asciidoc tool which is used to generate documentation and the
+default daemon user (usually hacluster).
+
+It then calls the python setuptools setup.py to actually process the
+Python module sources and install into the Python system site-packages
+directory.
+
+```shell
+./autogen.sh
+./configure
+make
+make install
+```
+
+## Test suites
+
+There are two sets of tests: Unit tests and regression tests.
+
+#### Unit tests
+In local:
+- `pip install tox`
+- In root directory of crmsh project, run `tox`
+
+#### Functional tests
+In local:
+- In root directory of crmsh project, run `./test/run-functional-tests [OPTIONS]|[TESTCASE INDEX]`
+
+```
+# ./test/run-functional-tests -h
+Usage: run-functional-tests [OPTIONS]|[TESTCASE INDEX]
+run-functional-tests is a tool for developers to setup the cluster in containers to run functional tests.
+The container image is based on Tumbleweed with preinstalled packages of the cluster stack include pacemaker/corosync/crmsh and many others.
+Users can make the code change under crmsh.git including test cases. This tool will pick up the code change and "make install" to all running containers.
+
+OPTIONS:
+ -h, --help Show this help message and exit
+ -l List existing functional test cases and exit
+ -n NUM Only setup a cluster with NUM nodes(containers)
+ -x Don't config corosync on containers(with -n option)
+ -d Cleanup the cluster containers
+```
+
+The docker base image used is defined in the `Dockerfile` included in the repository.
+
+[In github action](https://github.com/ClusterLabs/crmsh/actions/workflows/crmsh-ci.yml)
+
+## Manifest
+
+```shell
+./doc: man page, source for the website and other documentation
+./crmsh: the code
+./templates: configuration templates
+./test: unit tests and regression tests
+./contrib: vim highlighting scripts and other semi-related
+ contributions
+./crm report: log file collection and analysis tool
+```
+
+## Development
+
+The source code for crmsh is kept in a git source repository. To check
+out the latest development version, install git and run this command:
+
+```shell
+git clone https://github.com/ClusterLabs/crmsh
+```
+
+There is a git `pre-commit` hook used to update the data-manifest
+which lists all the data files to be installed. To install this, run
+
+```shell
+cp contrib/git-hook-pre-commit .git/hooks/pre-commit
+chmod +x .git/hooks/pre-commit
+```
+
+## Contributing
+
+You can contribute following the standard `fork -> clone -> change -> pull request` Github process for code changes. The pull request process is integrated with the [openSUSE Build Service](https://build.opensuse.org/), and as soon as it gets merged, a new RPM package is built on [network:ha-clustering:Unstable](https://build.opensuse.org/project/show/network:ha-clustering:Unstable) and a `submit request` is created to the _crmsh_ package maintainers at [network:ha-clustering:Factory](https://build.opensuse.org/project/show/network:ha-clustering:Factory).
+
+The commit messages are used to create the changelog, so, please, include relevant and comprehensive messages on your commits.
+
+## Community
+
+* Bugs and issues can be reported at the [crmsh issues @ Github.com](https://github.com/clusterlabs/crmsh/issues) page.
+* Any other questions or comments can be made on the [Clusterlabs users mailing list](http://clusterlabs.org/mailman/listinfo/users).
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..91600f6
--- /dev/null
+++ b/TODO
@@ -0,0 +1,34 @@
+Features
+
+. Audit
+
+ - add user auditing, i.e. save all commands that were run
+ (DONE: see the -R flag)
+
+ - save to a local file (distributed DB would probably be an
+ overkill)
+
+. Cluster documentation
+
+ - one of the more recent features is graph capability
+ (graphviz) which is a very good step in terms of cluster
+ documentation; need to extend that with some textual
+ cluster description and perhaps history and such
+
+ - everybody likes reports (and in particular your boss)
+
+ - this feature needs very careful consideration
+
+. CIB features
+
+ - Support ACL commands in Pacemaker 1.1.12>
+ (DONE)
+
+. Command features
+
+ - Relative commands: /status from configure, ../resource stop foo
+ from configure, cib/new from configure... for example.
+
+ Tricky part: Have to push/pop levels invisibly, resource
+ commands modify CIB while CIB is edited in configure. Similar
+ races could occur with other commands. \ No newline at end of file
diff --git a/autogen.sh b/autogen.sh
new file mode 100755
index 0000000..f92ecc3
--- /dev/null
+++ b/autogen.sh
@@ -0,0 +1,144 @@
+#!/bin/sh
+#
+# License: GNU General Public License (GPL)
+# Copyright 2001 horms <horms@vergenet.net>
+# (heavily mangled by alanr)
+#
+# bootstrap: set up the project and get it ready to make
+#
+# Basically, we run autoconf and automake in the
+# right way to get things set up for this environment.
+#
+# We also look and see if those tools are installed, and
+# tell you where to get them if they're not.
+#
+# Our goal is to not require dragging along anything
+# more than we need. If this doesn't work on your system,
+# (i.e., your /bin/sh is broken) send us a patch.
+#
+# This code loosely based on the corresponding named script in
+# enlightenment, and also on the sort-of-standard autoconf
+# bootstrap script.
+
+# Run this to generate all the initial makefiles, etc.
+
+testProgram()
+{
+ cmd=$1
+
+ if [ -z "$cmd" ]; then
+ return 1;
+ fi
+
+ arch=`uname -s`
+
+ # Make sure the which is in an if-block... on some platforms it throws exceptions
+ #
+ # The ERR trap is not executed if the failed command is part
+ # of an until or while loop, part of an if statement, part of a &&
+ # or || list.
+ if
+ which $cmd </dev/null >/dev/null 2>&1
+ then
+ :
+ else
+ return 1
+ fi
+
+ # The GNU standard is --version
+ if
+ $cmd --version </dev/null >/dev/null 2>&1
+ then
+ return 0
+ fi
+
+ # Maybe it suppports -V instead
+ if
+ $cmd -V </dev/null >/dev/null 2>&1
+ then
+ return 0
+ fi
+
+ # Nope, the program seems broken
+ return 1
+}
+
+gnu="ftp://ftp.gnu.org/pub/gnu"
+
+for command in autoconf213 autoconf253 autoconf259 autoconf
+do
+ if
+ testProgram $command == 1
+ then
+ autoconf=$command
+ autoheader=`echo "$autoconf" | sed -e 's/autoconf/autoheader/'`
+ autom4te=`echo "$autoconf" | sed -e 's/autoconf/autmo4te/'`
+ autoreconf=`echo "$autoconf" | sed -e 's/autoconf/autoreconf/'`
+ autoscan=`echo "$autoconf" | sed -e 's/autoconf/autoscan/'`
+ autoupdate=`echo "$autoconf" | sed -e 's/autoconf/autoupdate/'`
+ ifnames=`echo "$autoconf" | sed -e 's/autoconf/ifnames/'`
+ fi
+done
+
+for command in automake14 automake-1.4 automake15 automake-1.5 automake17 automake-1.7 automake19 automake-1.9 automake-1.11 automake
+do
+ if
+ testProgram $command
+ then
+ : OK $pkg is installed
+ automake=$command
+ aclocal=`echo "$automake" | sed -e 's/automake/aclocal/'`
+ fi
+done
+
+if [ -z $autoconf ]; then
+ echo You must have autoconf installed to compile the crmsh package.
+ echo Download the appropriate package for your system,
+ echo or get the source tarball at: $gnu/autoconf/
+ exit 1
+
+elif [ -z $automake ]; then
+ echo You must have automake installed to compile the crmsh package.
+ echo Download the appropriate package for your system,
+ echo or get the source tarball at: $gnu/automake/
+ exit 1
+
+fi
+
+# Create local copies so that the incremental updates will work.
+rm -f ./autoconf ./automake ./autoheader
+ln -s `which $autoconf` ./autoconf
+ln -s `which $automake` ./automake
+ln -s `which $autoheader` ./autoheader
+
+printf "$autoconf:\t"
+$autoconf --version | head -n 1
+
+printf "$automake:\t"
+$automake --version | head -n 1
+
+arch=`uname -s`
+# Disable the errors on FreeBSD until a fix can be found.
+if [ ! "$arch" = "FreeBSD" ]; then
+set -e
+#
+# All errors are fatal from here on out...
+# The shell will complain and exit on any "uncaught" error code.
+#
+#
+# And the trap will ensure sure some kind of error message comes out.
+#
+trap 'echo ""; echo "$0 exiting due to error (sorry!)." >&2' 0
+fi
+
+echo $aclocal $ACLOCAL_FLAGS
+$aclocal $ACLOCAL_FLAGS
+
+echo $automake --add-missing --include-deps --copy
+$automake --add-missing --include-deps --copy
+
+echo $autoconf
+$autoconf
+
+echo Now run ./configure
+trap '' 0
diff --git a/bin/crm b/bin/crm
new file mode 100755
index 0000000..6fe74d8
--- /dev/null
+++ b/bin/crm
@@ -0,0 +1,56 @@
+#!/usr/bin/python3
+#
+# crmsh, command line interface for Linux HA clusters
+# Copyright (C) 2008-2015 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013-2015 Kristoffer Gronlund <kgronlund@suse.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import sys
+
+if int(sys.version[0]) < 3:
+ sys.stderr.write("Abort: crmsh only support python3\n")
+ sys.exit(-1)
+
+try:
+ from crmsh import log
+ if '-h' not in sys.argv and '--help' not in sys.argv:
+ log.setup_logging()
+ else:
+ log.setup_logging(only_help=True)
+
+ from crmsh import main
+except ImportError as msg:
+ sys.stderr.write('''Fatal error:
+ %s
+
+Failed to start crmsh! This is likely due to:
+- A missing dependency (eg. corresponding python3 version)
+- A broken installation
+
+If you are using a packaged version of crmsh, please try
+reinstalling the package. Also check your PYTHONPATH and
+make sure that the crmsh module is reachable.
+
+Please file an issue describing your installation at
+https://github.com/Clusterlabs/crmsh/issues/ .
+''' % (msg))
+ sys.exit(-1)
+
+rc = main.run()
+sys.exit(rc)
+
+# vim:ts=4:sw=4:et:
diff --git a/codecov.yml b/codecov.yml
new file mode 100644
index 0000000..19960ec
--- /dev/null
+++ b/codecov.yml
@@ -0,0 +1,17 @@
+coverage:
+ status:
+ project:
+ default:
+ threshold: 0.35%
+ patch:
+ default:
+ threshold: 0.35%
+codecov:
+ token: 16b01c29-3b23-4923-b33a-4d26a49d80c4
+ notify:
+ after_n_builds: 23
+comment:
+ after_n_builds: 23
+
+ignore:
+ - "crmsh/report"
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..c0cb600
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,62 @@
+dnl
+dnl autoconf for crmsh
+dnl
+dnl Copyright (C) 2015 Kristoffer Gronlund
+dnl Copyright (C) 2008 Andrew Beekhof
+dnl
+dnl License: GNU General Public License (GPL)
+
+AC_PREREQ([2.53])
+
+AC_INIT([crmsh],[4.5.0],[users@clusterlabs.org])
+
+AC_ARG_WITH(version,
+ [ --with-version=version Override package version (if you're a packager needing to pretend) ],
+ [ PACKAGE_VERSION="$withval" ])
+
+AC_ARG_WITH(pkg-name,
+ [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ],
+ [ PACKAGE_NAME="$withval" ])
+
+OCF_ROOT_DIR="/usr/lib/ocf"
+AC_ARG_WITH(ocf-root,
+ [ --with-ocf-root=DIR directory for OCF scripts [${OCF_ROOT_DIR}]],
+ [ if test x"$withval" = xprefix; then OCF_ROOT_DIR=${prefix}; else
+ OCF_ROOT_DIR="$withval"; fi ])
+
+AC_ARG_WITH(daemon-user,
+ [ --with-daemon-user=USER_NAME
+ User to run privileged non-root things as. [default=hacluster] ],
+ [ CRM_DAEMON_USER="$withval" ],
+ [ CRM_DAEMON_USER="hacluster" ])
+
+AM_INIT_AUTOMAKE([no-define foreign])
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES])
+AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE_NAME")
+AC_DEFINE_UNQUOTED(VERSION, "$PACKAGE_VERSION")
+
+dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from
+dnl normal compilation. When a failure occurs, it will then display the full
+dnl command line
+dnl Wrap in m4_ifdef to avoid breaking on older platforms
+m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES])
+
+AC_SUBST(OCF_ROOT_DIR)
+AC_SUBST(CRM_DAEMON_USER)
+
+CRM_CACHE_DIR=${localstatedir}/cache/crm
+AC_DEFINE_UNQUOTED(CRM_CACHE_DIR,"$CRM_CACHE_DIR", Where crm shell keeps the cache)
+AC_SUBST(CRM_CACHE_DIR)
+
+AM_PATH_PYTHON([3])
+AC_PATH_PROGS(ASCIIDOC, asciidoc)
+
+AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"")
+
+AC_CONFIG_FILES(Makefile \
+etc/crm.conf \
+version \
+crmsh.spec \
+)
+
+AC_OUTPUT
diff --git a/contrib/README.vimsyntax b/contrib/README.vimsyntax
new file mode 100644
index 0000000..3bf5a7a
--- /dev/null
+++ b/contrib/README.vimsyntax
@@ -0,0 +1,35 @@
+There were two VIM syntax files contributed:
+
+pacemaker-crm.vim
+pcmk.vim
+
+The first one got removed because it didn't work with newer CRM
+shell syntax anymore; most of the text was highlighted as "Error".
+
+Neither matches colours used in crm configure show and both need
+to be improved. Still, you may want to edit a more colorful
+configuration. To have that in "crm configure edit" do the
+following:
+
+ 1. Copy pcmk.vim to ~/.vim/syntax/pcmk.vim.
+
+ 2. Make sure the following is added to your VIM rc file
+ (~/.vimrc or ~/.exrc):
+
+ syntax on
+ set modeline
+ set modelines=5
+
+ 3. Copy the pcmk-ftdetect.vim to ~/.vim/ftdetect/ to
+ make files being identified automatically.
+
+
+If you're editing a file directly, just type:
+
+ :setf pcmk
+
+Many thanks to the contributors:
+
+Trevor Hemsley <themsley@voiceflex.com>
+Dan Frincu <df.cluster@gmail.com>
+Lars Ellenberg <lars@linbit.com>
diff --git a/contrib/bash_completion.sh b/contrib/bash_completion.sh
new file mode 100644
index 0000000..f497942
--- /dev/null
+++ b/contrib/bash_completion.sh
@@ -0,0 +1,256 @@
+#-*- mode: shell-script;-*-
+#
+# bash completion support for crmsh.
+#
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# Conceptually based on gitcompletion (http://gitweb.hawaga.org.uk/).
+# Distributed under the GNU General Public License, version 2.0.
+#
+# To use these routines:
+#
+# 1) Copy this file to somewhere (e.g. ~/.crm-completion.sh).
+# 2) Add the following line to your .bashrc/.zshrc:
+# source ~/.crm-completion.sh
+
+shopt -s extglob
+
+# The following function is based on code from:
+#
+# bash_completion - programmable completion functions for bash 3.2+
+#
+# Copyright © 2006-2008, Ian Macdonald <ian@caliban.org>
+# © 2009-2010, Bash Completion Maintainers
+# <bash-completion-devel@lists.alioth.debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# The latest version of this software can be obtained here:
+#
+# http://bash-completion.alioth.debian.org/
+#
+# RELEASE: 2.x
+
+# This function can be used to access a tokenized list of words
+# on the command line:
+#
+# __git_reassemble_comp_words_by_ref '=:'
+# if test "${words_[cword_-1]}" = -w
+# then
+# ...
+# fi
+#
+# The argument should be a collection of characters from the list of
+# word completion separators (COMP_WORDBREAKS) to treat as ordinary
+# characters.
+#
+# This is roughly equivalent to going back in time and setting
+# COMP_WORDBREAKS to exclude those characters. The intent is to
+# make option types like --date=<type> and <rev>:<path> easy to
+# recognize by treating each shell word as a single token.
+#
+# It is best not to set COMP_WORDBREAKS directly because the value is
+# shared with other completion scripts. By the time the completion
+# function gets called, COMP_WORDS has already been populated so local
+# changes to COMP_WORDBREAKS have no effect.
+#
+# Output: words_, cword_, cur_.
+
+__crm_reassemble_comp_words_by_ref()
+{
+ local exclude i j first
+ # Which word separators to exclude?
+ exclude="${1//[^$COMP_WORDBREAKS]}"
+ cword_=$COMP_CWORD
+ if [ -z "$exclude" ]; then
+ words_=("${COMP_WORDS[@]}")
+ return
+ fi
+ # List of word completion separators has shrunk;
+ # re-assemble words to complete.
+ for ((i=0, j=0; i < ${#COMP_WORDS[@]}; i++, j++)); do
+ # Append each nonempty word consisting of just
+ # word separator characters to the current word.
+ first=t
+ while
+ [ $i -gt 0 ] &&
+ [ -n "${COMP_WORDS[$i]}" ] &&
+ # word consists of excluded word separators
+ [ "${COMP_WORDS[$i]//[^$exclude]}" = "${COMP_WORDS[$i]}" ]
+ do
+ # Attach to the previous token,
+ # unless the previous token is the command name.
+ if [ $j -ge 2 ] && [ -n "$first" ]; then
+ ((j--))
+ fi
+ first=
+ words_[$j]=${words_[j]}${COMP_WORDS[i]}
+ if [ $i = $COMP_CWORD ]; then
+ cword_=$j
+ fi
+ if (($i < ${#COMP_WORDS[@]} - 1)); then
+ ((i++))
+ else
+ # Done.
+ return
+ fi
+ done
+ words_[$j]=${words_[j]}${COMP_WORDS[i]}
+ if [ $i = $COMP_CWORD ]; then
+ cword_=$j
+ fi
+ done
+}
+
+if ! type _get_comp_words_by_ref >/dev/null 2>&1; then
+_get_comp_words_by_ref ()
+{
+ local exclude cur_ words_ cword_
+ if [ "$1" = "-n" ]; then
+ exclude=$2
+ shift 2
+ fi
+ __crm_reassemble_comp_words_by_ref "$exclude"
+ cur_=${words_[cword_]}
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ cur)
+ cur=$cur_
+ ;;
+ prev)
+ prev=${words_[$cword_-1]}
+ ;;
+ words)
+ words=("${words_[@]}")
+ ;;
+ cword)
+ cword=$cword_
+ ;;
+ esac
+ shift
+ done
+}
+fi
+
+__crmcompadd ()
+{
+ local i=0
+ for x in $1; do
+ if [[ "$x" == "$3"* ]]; then
+ if [[ "$x" =~ .*(=|:)$ ]];then
+ if [[ "$x" =~ ^id=$ ]];then
+ :
+ else
+ COMPREPLY[i++]="$2$x"
+ fi
+ else
+ COMPREPLY[i++]="$2$x$4"
+ fi
+ fi
+ done
+}
+
+# Generates completion reply, appending a space to possible completion words,
+# if necessary.
+# It accepts 1 to 4 arguments:
+# 1: List of possible completion words.
+# 2: A prefix to be added to each possible completion word (optional).
+# 3: Generate possible completion matches for this word (optional).
+# 4: A suffix to be appended to each possible completion word (optional).
+__crmcomp ()
+{
+ local cur_="${3-$cur}"
+
+ case "$cur_" in
+ --*=)
+ ;;
+ *)
+ local c i=0 IFS=$' \t\n'
+ for c in $1; do
+ c="$c${4-}"
+ if [[ $c == "$cur_"* ]]; then
+ case $c in
+ --*=*|*.) ;;
+ *) c="$c " ;;
+ esac
+ COMPREPLY[i++]="${2-}$c"
+ fi
+ done
+ ;;
+ esac
+}
+
+# Generates completion reply from newline-separated possible completion words
+# by appending a space to all of them.
+# It accepts 1 to 4 arguments:
+# 1: List of possible completion words, separated by a single newline.
+# 2: A prefix to be added to each possible completion word (optional).
+# 3: Generate possible completion matches for this word (optional).
+# 4: A suffix to be appended to each possible completion word instead of
+# the default space (optional). If specified but empty, nothing is
+# appended.
+__crmcomp_nl ()
+{
+ local IFS=$'\n'
+ __crmcompadd "$1" "${2-}" "${3-$cur}" "${4- }"
+}
+
+__crm_compgen ()
+{
+ local cur_="$cur" cmd="${words[1]}"
+ local pfx=""
+
+ case "$cur_" in
+ *:*)
+ case "$COMP_WORDBREAKS" in
+ *:*) : great ;;
+ *) pfx="${cur_%%:*}:" ;;
+ esac
+ cur_="${cur_##*:}"
+ ;;
+ esac
+
+ __crmcomp_nl "$(2>/dev/null crm --compgen "${COMP_POINT}" "${COMP_LINE}")" "$pfx" "$cur_"
+}
+
+_crm() {
+ local cur words cword prev
+
+ _get_comp_words_by_ref -n =: cur words cword prev
+
+ for ((i=1; $i<=$cword; i++)); do
+ if [[ ${words[i]} != -* ]]; then
+ if [[ ${words[i-1]} != @(-f|--file|-H|--history|-D|--display|-X|-c|--cib) ]]; then
+ arg="${words[i]}"
+ argi=$i
+ break
+ fi
+ fi
+ done
+
+ case $prev in
+ -f|--file|-H|--history|-D|--display|-X|-c|--cib)
+ # use default completion
+ return
+ ;;
+ esac
+
+ if [[ "$cur" == -* ]]; then
+ __crmcomp '-w -h -d -F -R -f --file -H --history -D --display -X -c --cib'
+ return 0
+ fi
+
+ __crm_compgen
+} &&
+complete -o bashdefault -o default -o nospace -F _crm crm || complete -o default -o nospace -F _crm crm
diff --git a/contrib/git-hook-pre-commit b/contrib/git-hook-pre-commit
new file mode 100755
index 0000000..f4bd35a
--- /dev/null
+++ b/contrib/git-hook-pre-commit
@@ -0,0 +1,14 @@
+#!/bin/sh
+#
+# An example hook script to verify what is about to be committed.
+# Called by "git commit" with no arguments. The hook should
+# exit with non-zero status after issuing an appropriate message if
+# it wants to stop the commit.
+#
+# To enable this hook, rename this file to "pre-commit".
+
+root="$(git rev-parse --show-toplevel)"
+[ -d "$root" ] || exit 1
+
+./update-data-manifest.sh
+git add ./data-manifest
diff --git a/contrib/pcmk-ftdetect.vim b/contrib/pcmk-ftdetect.vim
new file mode 100644
index 0000000..e8e5387
--- /dev/null
+++ b/contrib/pcmk-ftdetect.vim
@@ -0,0 +1,2 @@
+" test for match at first character
+au BufNewFile,BufRead * if match(getline(1), 'node ')==0 | set ft=pcmk | endif
diff --git a/contrib/pcmk.vim b/contrib/pcmk.vim
new file mode 100644
index 0000000..19a93b6
--- /dev/null
+++ b/contrib/pcmk.vim
@@ -0,0 +1,114 @@
+" Vim syntax file
+" Author: Trevor Hemsley <themsley@voiceflex.com>
+" Author: Dan Frincu <df.cluster@gmail.com>
+" Language: pcmk
+" Filenames: *.pcmk
+
+" For version 5.x: Clear all syntax items
+" For version 6.x: Quit when a syntax file was already loaded
+if version < 600
+ syntax clear
+elseif exists("b:current_syntax")
+ finish
+endif
+
+set modeline
+
+" setlocal iskeyword+=-
+
+" Errors
+syn match pcmkParErr ")"
+syn match pcmkBrackErr "]"
+syn match pcmkBraceErr "}"
+
+" Enclosing delimiters
+syn region pcmkEncl transparent matchgroup=pcmkParEncl start="(" matchgroup=pcmkParEncl end=")" contains=ALLBUT,pcmkParErr
+syn region pcmkEncl transparent matchgroup=pcmkBrackEncl start="\[" matchgroup=pcmkBrackEncl end="\]" contains=ALLBUT,pcmkBrackErr
+syn region pcmkEncl transparent matchgroup=pcmkBraceEncl start="{" matchgroup=pcmkBraceEncl end="}" contains=ALLBUT,pcmkBraceErr
+
+" Comments
+syn region pcmkComment start="//" end="$" contains=pcmkComment,pcmkTodo
+syn region pcmkComment start="/\*" end="\*/" contains=pcmkComment,pcmkTodo
+syn keyword pcmkTodo contained TODO FIXME XXX
+
+" Strings
+syn region pcmkString start=+"+ skip=+\\\\\|\\"+ end=+"+
+
+" General keywords
+syn keyword pcmkKeyword node primitive property rsc_defaults op_defaults group clone nextgroup=pcmkName skipwhite
+syn keyword pcmkKey2 location nextgroup=pcmkResource skipwhite
+syn keyword pcmkKey3 colocation order nextgroup=pcmkName3 skipwhite
+syn match pcmkResource /\<\f\+\>/ nextgroup=pcmkName2 skipwhite
+syn match pcmkName /\<\f\+\>/
+syn match pcmkName2 /\<\f\+\>/ nextgroup=pcmkPrio skipwhite
+syn match pcmkName3 /\<\f\+\>/ nextgroup=pcmkPrio skipwhite
+syn match pcmkPrio /\<\w\+\>/
+syn match pcmkNumbers /[[:digit:]]\+\:/
+syn match pcmkInf /inf\:/
+
+" Graph attributes
+syn keyword pcmkType attributes params op meta
+syn keyword pcmkTag monitor start stop migrate_from migrate_to notify demote promote Master Slave
+
+" Special chars
+"syn match pcmkKeyChar "="
+syn match pcmkKeyChar ";"
+syn match pcmkKeyChar "->"
+syn match pcmkKeyChar "\$"
+"syn match pcmkKeyChar "\\"
+syn match pcmkKeyChar ":"
+syn match pcmkKeyChar "-"
+syn match pcmkKeyChar "+"
+
+" Identifier
+syn match pcmkIdentifier /\<\w\+\>/
+syn match pcmkKeyword "^ms\s*" nextgroup=pcmkName skipwhite
+
+" Synchronization
+syn sync minlines=50
+syn sync maxlines=500
+
+" Define the default highlighting.
+" For version 5.7 and earlier: only when not done already
+" For version 5.8 and later: only when an item doesn't have highlighting yet
+if version >= 508 || !exists("did_pcmk_syntax_inits")
+ if version < 508
+ let did_pcmk_syntax_inits = 1
+ command -nargs=+ HiLink hi link <args>
+ else
+ command -nargs=+ HiLink hi def link <args>
+ endif
+
+ HiLink pcmkParErr Error
+ HiLink pcmkBraceErr Error
+ HiLink pcmkBrackErr Error
+
+ HiLink pcmkComment Comment
+ HiLink pcmkTodo Todo
+
+ HiLink pcmkParEncl Keyword
+ HiLink pcmkBrackEncl Keyword
+ HiLink pcmkBraceEncl Keyword
+
+ HiLink pcmkKeyword Keyword
+ HiLink pcmkKey2 Keyword
+ HiLink pcmkKey3 Keyword
+ HiLink pcmkType Keyword
+ HiLink pcmkKeyChar Keyword
+
+" hi Normal ctermfg=yellow ctermbg=NONE cterm=NONE
+ HiLink pcmkString String
+ HiLink pcmkIdentifier Identifier
+ HiLink pcmkTag Tag
+ HiLink pcmkName Type
+ HiLink pcmkName2 Tag
+ HiLink pcmkName3 Type
+ HiLink pcmkResource Type
+ HiLink pcmkPrio Number
+ HiLink pcmkNumbers String
+ HiLink pcmkInf String
+
+ delcommand HiLink
+endif
+
+let b:current_syntax = "pcmk"
diff --git a/contrib/pygments_crmsh_lexers/__init__.py b/contrib/pygments_crmsh_lexers/__init__.py
new file mode 100644
index 0000000..938d6e8
--- /dev/null
+++ b/contrib/pygments_crmsh_lexers/__init__.py
@@ -0,0 +1,3 @@
+from __future__ import unicode_literals
+from .ansiclr import ANSIColorsLexer
+from .crmsh import CrmshLexer
diff --git a/contrib/pygments_crmsh_lexers/ansiclr.py b/contrib/pygments_crmsh_lexers/ansiclr.py
new file mode 100644
index 0000000..b30ee85
--- /dev/null
+++ b/contrib/pygments_crmsh_lexers/ansiclr.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.console
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for misc console output.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from __future__ import unicode_literals
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Generic, Text
+
+__all__ = ['ANSIColorsLexer']
+
+_ESC = "\x1b\["
+# this is normally to reset (reset attributes, set primary font)
+# there could be however other reset sequences and in that case
+# sgr0 needs to be updated
+_SGR0 = "%s(?:0;10|10;0)m" % _ESC
+# BLACK RED GREEN YELLOW
+# BLUE MAGENTA CYAN WHITE
+_ANSI_COLORS = (Generic.Emph, Generic.Error, Generic.Inserted, Generic.Keyword,
+ Generic.Keyword, Generic.Prompt, Generic.Traceback, Generic.Output)
+
+
+def _ansi2rgb(lexer, match):
+ code = match.group(1)
+ text = match.group(2)
+ yield match.start(), _ANSI_COLORS[int(code)-30], text
+
+
+class ANSIColorsLexer(RegexLexer):
+ """
+ Interpret ANSI colors.
+ """
+ name = 'ANSI Colors'
+ aliases = ['ansiclr']
+ filenames = ["*.typescript"]
+
+ tokens = {
+ 'root': [
+ (r'%s(3[0-7]+)m(.*?)%s' % (_ESC, _SGR0), _ansi2rgb),
+ (r'[^\x1b]+', Text),
+ # drop the rest of the graphic codes
+ (r'(%s[0-9;]+m)()' % _ESC, bygroups(None, Text)),
+ ]
+ }
diff --git a/contrib/pygments_crmsh_lexers/crmsh.py b/contrib/pygments_crmsh_lexers/crmsh.py
new file mode 100644
index 0000000..d4f1b14
--- /dev/null
+++ b/contrib/pygments_crmsh_lexers/crmsh.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.dsls
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for various domain-specific languages.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from __future__ import unicode_literals
+
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, Punctuation, Whitespace
+
+__all__ = ['CrmshLexer']
+
+
+class CrmshLexer(RegexLexer):
+ """
+ Lexer for `crmsh <http://crmsh.github.io/>`_ configuration files
+ for Pacemaker clusters.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Crmsh'
+ aliases = ['crmsh', 'pcmk']
+ filenames = ['*.crmsh', '*.pcmk']
+ mimetypes = []
+
+ elem = words((
+ 'node', 'primitive', 'group', 'clone', 'ms', 'location',
+ 'colocation', 'order', 'fencing_topology', 'rsc_ticket',
+ 'rsc_template', 'property', 'rsc_defaults',
+ 'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
+ 'tag'), suffix=r'(?![\w#$-])')
+ sub = words((
+ 'params', 'meta', 'operations', 'op', 'rule',
+ 'attributes', 'utilization'), suffix=r'(?![\w#$-])')
+ acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
+ bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
+ un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
+ date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
+ acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
+ bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
+ val_qual = (r'(?:string|version|number)')
+ rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
+ r'start|promote|demote|stop)')
+
+ tokens = {
+ 'root': [
+ (r'^#.*\n?', Comment),
+ # attr=value (nvpair)
+ (r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
+ bygroups(Name.Attribute, Punctuation, String)),
+ # need this construct, otherwise numeric node ids
+ # are matched as scores
+ # elem id:
+ (r'(node)(\s+)([\w#$-]+)(:)',
+ bygroups(Keyword, Whitespace, Name, Punctuation)),
+ # scores
+ (r'([+-]?([0-9]+|inf)):', Number),
+ # keywords (elements and other)
+ (elem, Keyword),
+ (sub, Keyword),
+ (acl, Keyword),
+ # binary operators
+ (r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops),
+ Operator.Word),
+ # other operators
+ (bin_rel, Operator.Word),
+ (un_ops, Operator.Word),
+ (date_exp, Operator.Word),
+ # builtin attributes (e.g. #uname)
+ (r'#[a-z]+(?![\w#$-])', Name.Builtin),
+ # acl_mod:blah
+ (r'(%s)(:)("(?:""|[^"])*"|\S+)' % acl_mod,
+ bygroups(Keyword, Punctuation, Name)),
+ # rsc_id[:(role|action)]
+ # NB: this matches all other identifiers
+ (r'([\w#$-]+)(?:(:)(%s))?(?![\w#$-])' % rsc_role_action,
+ bygroups(Name, Punctuation, Operator.Word)),
+ # punctuation
+ (r'(\\(?=\n)|[[\](){}/:@])', Punctuation),
+ (r'\s+|\n', Whitespace),
+ ],
+ }
diff --git a/contrib/setup.py b/contrib/setup.py
new file mode 100644
index 0000000..30330b8
--- /dev/null
+++ b/contrib/setup.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+
+from __future__ import unicode_literals
+from setuptools import setup
+
+setup(name='pygments-crmsh-lexers',
+ version='0.0.5',
+ description='Pygments crmsh custom lexers.',
+ keywords='pygments crmsh lexer',
+ license='BSD',
+
+ author='Kristoffer Gronlund',
+ author_email='kgronlund@suse.com',
+
+ url='https://github.com/ClusterLabs/crmsh',
+
+ packages=['pygments_crmsh_lexers'],
+ install_requires=['pygments>=2.0.2'],
+
+ entry_points='''[pygments.lexers]
+ ANSIColorsLexer=pygments_crmsh_lexers:ANSIColorsLexer
+ CrmshLexer=pygments_crmsh_lexers:CrmshLexer''',
+
+ classifiers=[
+ 'Environment :: Plugins',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: BSD License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 3',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ ],)
diff --git a/crmsh.spec.in b/crmsh.spec.in
new file mode 100644
index 0000000..141b143
--- /dev/null
+++ b/crmsh.spec.in
@@ -0,0 +1,289 @@
+#
+# spec file for package crmsh
+#
+# Copyright (c) 2023 SUSE LLC
+#
+# All modifications and additions to the file contributed by third parties
+# remain the property of their copyright owners, unless otherwise agreed
+# upon. The license for this file, and modifications and additions to the
+# file, is the same license as for the pristine package itself (unless the
+# license for the pristine package is not an Open Source License, in which
+# case the license is the MIT License). An "Open Source License" is a
+# license that conforms to the Open Source Definition (Version 1.9)
+# published by the Open Source Initiative.
+
+# Please submit bugfixes or comments via https://bugs.opensuse.org/
+#
+
+
+%bcond_with regression_tests
+
+%global gname haclient
+%global uname hacluster
+%global crmsh_docdir %{_defaultdocdir}/%{name}
+
+%global upstream_version tip
+%global upstream_prefix crmsh
+%global crmsh_release 1
+
+%if 0%{?fedora_version} || 0%{?centos_version} || 0%{?rhel_version} || 0%{?rhel} || 0%{?fedora}
+%define pkg_group System Environment/Daemons
+%else
+%define pkg_group Productivity/Clustering/HA
+%endif
+
+Name: crmsh
+Summary: High Availability cluster command-line interface
+License: GPL-2.0-or-later
+Group: %{pkg_group}
+Version: @VERSION@
+Release: 0
+URL: http://crmsh.github.io
+Source0: %{name}-%{version}.tar.bz2
+Source1: %{name}.tmpfiles.d.conf
+
+BuildRoot: %{_tmppath}/%{name}-%{version}-build
+%if 0%{?suse_version}
+# Requiring pacemaker makes crmsh harder to build on other distributions,
+# and is mostly a convenience feature. So only do it for SUSE.
+Requires(pre): pacemaker
+%endif
+Requires: %{name}-scripts >= %{version}-%{release}
+Requires: /usr/bin/which
+Requires: python3 >= 3.4
+Requires: python3-PyYAML
+Requires: python3-lxml
+BuildRequires: python3-lxml
+BuildRequires: python3-pip
+BuildRequires: python3-wheel
+
+%if 0%{?suse_version}
+# only require csync2 on SUSE since bootstrap
+# only works for SUSE at the moment anyway
+Requires: csync2
+%endif
+
+%if 0%{?suse_version}
+# Suse splits this off into a separate package
+Requires: python3-curses
+Requires: python3-python-dateutil
+BuildRequires: fdupes
+BuildRequires: python3-curses
+%else
+Requires: python3-dateutil
+BuildRequires: pyproject-rpm-macros
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+%endif
+
+# Required for core functionality
+BuildRequires: asciidoc
+BuildRequires: autoconf
+BuildRequires: automake
+BuildRequires: pkgconfig
+BuildRequires: python3
+
+%if 0%{?suse_version}
+# xsltproc is necessary for manpage generation; this is split out into
+# libxslt-tools as of openSUSE 12.2. Possibly strictly should be
+# required by asciidoc
+BuildRequires: libxslt-tools
+%endif
+
+%if 0%{?suse_version} || 0%{?fedora_version} || 0%{?centos_version} || 0%{?rhel_version} || 0%{?rhel} || 0%{?fedora}
+BuildArch: noarch
+%endif
+
+%description
+The crm shell is a command-line interface for High-Availability
+cluster management on GNU/Linux systems. It simplifies the
+configuration, management and troubleshooting of Pacemaker-based
+clusters, by providing a powerful and intuitive set of features.
+
+%package test
+Summary: Test package for crmsh
+Group: %{pkg_group}
+Requires: crmsh
+%if %{with regression_tests}
+Requires(post): mailx
+Requires(post): procps
+%if 0%{?suse_version}
+Requires(post): python3-python-dateutil
+%else
+Requires(post): python3-dateutil
+%endif
+Requires(post): python3-tox
+Requires(post): pacemaker
+BuildArch: noarch
+%if 0%{?suse_version}
+Requires(post): libglue-devel
+%else
+Requires(post): cluster-glue-libs-devel
+%endif
+Requires(post): python3-PyYAML
+%endif
+
+%description test
+The crm shell is a command-line interface for High-Availability
+cluster management on GNU/Linux systems. It simplifies the
+configuration, management and troubleshooting of Pacemaker-based
+clusters, by providing a powerful and intuitive set of features.
+This package contains the regression test suite for crmsh.
+
+%package scripts
+Summary: Crm Shell Cluster Scripts
+Group: Productivity/Clustering/HA
+
+%description scripts
+Cluster scripts for crmsh. The cluster scripts can be run
+directly from the crm command line, or used by user interfaces
+like hawk to implement configuration wizards.
+
+%prep
+%setup -q
+
+# replace the shebang in all the scripts
+# with ${_bindir}/python3
+find . -type f -exec sed -i \
+ -e "s|#!/usr/bin/python3?|#!%{__python3}|" \
+ -e "s|#!/usr/bin/env python3?|#!%{__python3}|" \
+ {} \;
+sed -i -e '1{\@^#!%{_bindir}/python3@d}' crmsh/report/core.py
+
+# this is wrong FIXME
+sed -i -e '/data_files/d' setup.py
+
+%build
+./autogen.sh
+
+%{configure} \
+ --sysconfdir=%{_sysconfdir} \
+ --localstatedir=%{_var} \
+ --with-version=%{version} \
+ --docdir=%{crmsh_docdir}
+
+%if 0%{?suse_version}
+%python3_pyproject_wheel
+%else
+%pyproject_buildrequires -t
+%pyproject_wheel
+%endif
+
+# Generate manpages
+for manpg in doc/crm{,sh_crm_report}.8.adoc ; do
+ a2x -f manpage $manpg
+done
+
+for docad in doc/crm{,sh_crm_report}.8.adoc ; do
+ asciidoc --unsafe --backend=xhtml11 $docad
+done
+
+%if %{with regression_tests}
+tox
+if [ ! $? ]; then
+ echo "Unit tests failed."
+ exit 1
+fi
+%endif
+
+%install
+# make DESTDIR=%%{buildroot} docdir=%%{crmsh_docdir} install
+%if 0%{?suse_version}
+%python3_pyproject_install
+%else
+%pyproject_install
+%endif
+
+# additional directories
+install -d -m0770 %{buildroot}%{_localstatedir}/cache/crm
+install -d -m0770 %{buildroot}%{_localstatedir}/log/crmsh
+install -d -m0755 %{buildroot}%{_tmpfilesdir}
+
+# install configuration
+install -Dm0644 -t %{buildroot}%{_sysconfdir}/crm etc/{crm.conf,profiles.yml}
+install -m0644 %{SOURCE1} %{buildroot}%{_tmpfilesdir}/%{name}.conf
+
+# install manpages
+install -Dpm0644 -t %{buildroot}%{_mandir}/man8 doc/*.8
+install -Dpm0644 -t %{buildroot}%{_datadir}/crmsh/ doc/crm.8.adoc
+
+# install data
+for d in $(cat data-manifest); do
+ if [ -x $d ] ; then mode="0755" ; else mode="0644" ; fi
+ install -D -m $mode $d %{buildroot}%{_datadir}/crmsh/$d
+done
+mv %{buildroot}%{_datadir}/crmsh/test{,s}
+install -p test/testcases/xmlonly.sh \
+ %{buildroot}%{_datadir}/crmsh/tests/testcases/configbasic-xml.filter
+
+install -Dm0644 contrib/bash_completion.sh \
+ %{buildroot}%{_datadir}/bash-completion/completions/crm
+
+if [ -f %{buildroot}%{_bindir}/crm ]; then
+ install -Dm0755 %{buildroot}%{_bindir}/crm %{buildroot}%{_sbindir}/crm
+ rm %{buildroot}%{_bindir}/crm
+fi
+
+%if 0%{?suse_version}
+%fdupes %{buildroot}
+%endif
+
+%post
+%tmpfiles_create %{_tmpfilesdir}/%{name}.conf
+
+%if %{with regression_tests}
+# Run regression tests after installing the package
+# NB: this is called twice by OBS, that's why we touch the file
+%post test
+testfile=`mktemp -t .crmsh_regression_tests_ran_XXXXXX`
+# check if time in file is less than 2 minutes ago
+if [ -e $testfile ] && [ "$(( $(date +%s) - $(cat $testfile) ))" -lt 120 ]; then
+ echo "Skipping regression tests..."
+ exit 0
+fi
+# write current time to file
+rm -f "$testfile"
+echo "$(date +%s)" > "$testfile"
+%{_datadir}/%{name}/tests/regression.sh
+result1=$?
+cd %{_datadir}/%{name}/tests
+./cib-tests.sh
+result2=$?
+[ $result1 -ne 0 ] && (echo "Regression tests failed."; cat ${buildroot}/crmtestout/regression.out)
+[ $result2 -ne 0 ] && echo "CIB tests failed."
+[ $result1 -eq 0 -a $result2 -eq 0 ]
+%endif
+
+%files
+###########################################################
+%defattr(-,root,root)
+
+%{_sbindir}/crm
+%{python3_sitelib}/crmsh*
+
+%{_datadir}/%{name}
+%exclude %{_datadir}/%{name}/tests
+%exclude %{_datadir}/%{name}/scripts
+
+%{_tmpfilesdir}/%{name}.conf
+
+%doc doc/*.html
+%doc COPYING AUTHORS ChangeLog README.md
+%doc contrib/*
+%{_mandir}/man8/*
+
+%config %{_sysconfdir}/crm
+
+%dir %attr (770, %{uname}, %{gname}) %{_var}/cache/crm
+%dir %attr (770, %{uname}, %{gname}) %{_var}/log/crmsh
+%{_datadir}/bash-completion/completions/crm
+
+%files scripts
+%defattr(-,root,root)
+%{_datadir}/%{name}/scripts
+
+%files test
+%defattr(-,root,root)
+%{_datadir}/%{name}/tests
+
+%changelog
diff --git a/crmsh.tmpfiles.d.conf b/crmsh.tmpfiles.d.conf
new file mode 100644
index 0000000..01d991b
--- /dev/null
+++ b/crmsh.tmpfiles.d.conf
@@ -0,0 +1 @@
+d /var/log/crmsh 0775 hacluster haclient -
diff --git a/crmsh/__init__.py b/crmsh/__init__.py
new file mode 100644
index 0000000..feff2bb
--- /dev/null
+++ b/crmsh/__init__.py
@@ -0,0 +1,2 @@
+# This file is required for python packages.
+# It is intentionally empty.
diff --git a/crmsh/bootstrap.py b/crmsh/bootstrap.py
new file mode 100644
index 0000000..a386dfb
--- /dev/null
+++ b/crmsh/bootstrap.py
@@ -0,0 +1,3079 @@
+# Copyright (C) 2016 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# Bootstrap:
+#
+# Supersedes and replaces both the init/add/remove cluster scripts,
+# and the ha-cluster-bootstrap scripts.
+#
+# Implemented as a straight-forward set of python functions for
+# simplicity and flexibility.
+#
+# TODO: Make csync2 usage optional
+# TODO: Configuration file for bootstrap?
+import codecs
+import os
+import subprocess
+import sys
+import random
+import re
+import tempfile
+import time
+import readline
+import shutil
+import typing
+
+import yaml
+import socket
+from string import Template
+from lxml import etree
+
+from . import config, constants, ssh_key, sh
+from . import upgradeutil
+from . import utils
+from . import xmlutil
+from .cibconfig import mkset_obj, cib_factory
+from . import corosync
+from . import tmpfiles
+from . import lock
+from . import userdir
+from .constants import SSH_OPTION, QDEVICE_HELP_INFO, STONITH_TIMEOUT_DEFAULT,\
+ REJOIN_COUNT, REJOIN_INTERVAL, PCMK_DELAY_MAX, CSYNC2_SERVICE, WAIT_TIMEOUT_MS_DEFAULT
+from . import ocfs2
+from . import qdevice
+from . import parallax
+from . import log
+from .service_manager import ServiceManager
+from .sh import ShellUtils
+from .ui_node import NodeMgmt
+from .user_of_host import UserOfHost, UserNotFoundError
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+CSYNC2_KEY = "/etc/csync2/key_hagroup"
+CSYNC2_CFG = "/etc/csync2/csync2.cfg"
+COROSYNC_AUTH = "/etc/corosync/authkey"
+CRM_CFG = "/etc/crm/crm.conf"
+PROFILES_FILE = "/etc/crm/profiles.yml"
+SYSCONFIG_SBD = "/etc/sysconfig/sbd"
+SYSCONFIG_PCMK = "/etc/sysconfig/pacemaker"
+SYSCONFIG_NFS = "/etc/sysconfig/nfs"
+SYSCONFIG_FW = "/etc/sysconfig/SuSEfirewall2"
+SYSCONFIG_FW_CLUSTER = "/etc/sysconfig/SuSEfirewall2.d/services/cluster"
+PCMK_REMOTE_AUTH = "/etc/pacemaker/authkey"
+COROSYNC_CONF_ORIG = tmpfiles.create()[1]
+SERVICES_STOP_LIST = ["corosync-qdevice.service", "corosync.service", "hawk.service", CSYNC2_SERVICE]
+WATCHDOG_CFG = "/etc/modules-load.d/watchdog.conf"
+BOOTH_DIR = "/etc/booth"
+BOOTH_CFG = "/etc/booth/booth.conf"
+BOOTH_AUTH = "/etc/booth/authkey"
+SBD_SYSTEMD_DELAY_START_DIR = "/etc/systemd/system/sbd.service.d"
+FILES_TO_SYNC = (BOOTH_DIR, corosync.conf(), COROSYNC_AUTH, CSYNC2_CFG, CSYNC2_KEY, "/etc/ctdb/nodes",
+ "/etc/drbd.conf", "/etc/drbd.d", "/etc/ha.d/ldirectord.cf", "/etc/lvm/lvm.conf", "/etc/multipath.conf",
+ "/etc/samba/smb.conf", SYSCONFIG_NFS, SYSCONFIG_PCMK, SYSCONFIG_SBD, PCMK_REMOTE_AUTH, WATCHDOG_CFG,
+ PROFILES_FILE, CRM_CFG, SBD_SYSTEMD_DELAY_START_DIR)
+INIT_STAGES = ("ssh", "ssh_remote", "csync2", "csync2_remote", "corosync", "remote_auth", "sbd", "cluster", "ocfs2", "admin", "qdevice")
+
+
+class Context(object):
+ """
+ Context object used to avoid having to pass these variables
+ to every bootstrap method.
+ """
+ DEFAULT_PROFILE_NAME = "default"
+ S390_PROFILE_NAME = "s390"
+
+ def __init__(self):
+ '''
+ Initialize attributes
+ '''
+ self.type = None # init or join
+ self.quiet = None
+ self.yes_to_all = None
+ self.cluster_name = None
+ self.watchdog = None
+ self.no_overwrite_sshkey = None
+ self.nic_list = []
+ self.user_at_node_list = []
+ self.node_list_in_cluster = []
+ self.current_user = None
+ self.unicast = None
+ self.multicast = None
+ self.admin_ip = None
+ self.second_heartbeat = None
+ self.ipv6 = None
+ self.qdevice_inst = None
+ self.qnetd_addr = None
+ self.qdevice_port = None
+ self.qdevice_algo = None
+ self.qdevice_tie_breaker = None
+ self.qdevice_tls = None
+ self.qdevice_heuristics = None
+ self.qdevice_heuristics_mode = None
+ self.qdevice_rm_flag = None
+ self.ocfs2_devices = []
+ self.use_cluster_lvm2 = None
+ self.mount_point = None
+ self.cluster_node = None
+ self.force = None
+ self.arbitrator = None
+ self.clusters = None
+ self.tickets = None
+ self.sbd_manager = None
+ self.sbd_devices = []
+ self.diskless_sbd = None
+ self.stage = None
+ self.args = None
+ self.ui_context = None
+ self.interfaces_inst = None
+ self.with_other_user = True
+ self.cluster_is_running = None
+ self.cloud_type = None
+ self.is_s390 = False
+ self.profiles_data = None
+ self.skip_csync2 = None
+ self.profiles_dict = {}
+ self.default_nic_list = []
+ self.default_ip_list = []
+ self.local_ip_list = []
+ self.local_network_list = []
+ self.rm_list = [SYSCONFIG_SBD, CSYNC2_CFG, corosync.conf(), CSYNC2_KEY,
+ COROSYNC_AUTH, "/var/lib/heartbeat/crm/*", "/var/lib/pacemaker/cib/*",
+ "/var/lib/corosync/*", "/var/lib/pacemaker/pengine/*", PCMK_REMOTE_AUTH,
+ "/var/lib/csync2/*", "~/.config/crm/*"]
+ self.use_ssh_agent = False
+
+ @classmethod
+ def set_context(cls, options):
+ ctx = cls()
+ for opt in vars(options):
+ setattr(ctx, opt, getattr(options, opt))
+ ctx.initialize_user()
+ return ctx
+
+ def initialize_qdevice(self):
+ """
+ Initialize qdevice instance
+ """
+ if not self.qnetd_addr:
+ return
+ parts = self.qnetd_addr.split('@', 2)
+ if len(parts) == 2:
+ ssh_user = parts[0]
+ qnetd_host = parts[1]
+ else:
+ ssh_user = None
+ qnetd_host = self.qnetd_addr
+ self.qdevice_inst = qdevice.QDevice(
+ qnetd_host,
+ port=self.qdevice_port,
+ algo=self.qdevice_algo,
+ tie_breaker=self.qdevice_tie_breaker,
+ tls=self.qdevice_tls,
+ ssh_user=ssh_user,
+ cmds=self.qdevice_heuristics,
+ mode=self.qdevice_heuristics_mode,
+ is_stage=self.stage == "qdevice")
+
+ def initialize_user(self):
+ """
+ users_of_specified_hosts: 'not_specified', 'specified', 'no_hosts'
+ """
+ if self.cluster_node is None and self.user_at_node_list is None:
+ users_of_specified_hosts = 'no_hosts'
+ elif self.cluster_node is not None and '@' in self.cluster_node:
+ users_of_specified_hosts = 'specified'
+ elif self.user_at_node_list is not None and any('@' in x for x in self.user_at_node_list):
+ users_of_specified_hosts = 'specified'
+ else:
+ users_of_specified_hosts = 'not_specified'
+ sudoer = userdir.get_sudoer()
+ has_sudoer = sudoer is not None
+ if users_of_specified_hosts == 'specified':
+ if has_sudoer:
+ self.current_user = sudoer
+ else:
+ utils.fatal("Unsupported config: local node is using root and remote nodes is using non-root users.")
+ elif users_of_specified_hosts == 'not_specified':
+ assert userdir.getuser() == 'root'
+ self.current_user = 'root'
+ elif users_of_specified_hosts == 'no_hosts':
+ assert userdir.getuser() == 'root'
+ self.current_user = 'root'
+ else:
+ raise AssertionError('Bad parameter user_of_specified_hosts: {}'.format(users_of_specified_hosts))
+
+ def _validate_sbd_option(self):
+ """
+ Validate sbd options
+ """
+ if self.sbd_devices and self.diskless_sbd:
+ utils.fatal("Can't use -s and -S options together")
+ if self.stage == "sbd":
+ if not self.sbd_devices and not self.diskless_sbd and self.yes_to_all:
+ utils.fatal("Stage sbd should specify sbd device by -s or diskless sbd by -S option")
+ if ServiceManager().service_is_active("sbd.service") and not config.core.force:
+ utils.fatal("Can't configure stage sbd: sbd.service already running! Please use crm option '-F' if need to redeploy")
+ if self.cluster_is_running:
+ utils.check_all_nodes_reachable()
+
+ def _validate_nodes_option(self):
+ """
+ Validate -N/--nodes option
+ """
+ if self.user_at_node_list and self.stage:
+ utils.fatal("Can't use -N/--nodes option and stage({}) together".format(self.stage))
+ me = utils.this_node()
+ was_localhost_already = False
+ li = [utils.parse_user_at_host(x) for x in self.user_at_node_list]
+ for user in (user for user, node in li if node == me and user is not None and user != self.current_user):
+ utils.fatal(f"Overriding current user '{self.current_user}' by '{user}'. Ouch, don't do it.")
+ self.user_at_node_list = [value for (user, node), value in zip(li, self.user_at_node_list) if node != me]
+ for user, node in (utils.parse_user_at_host(x) for x in self.user_at_node_list):
+ utils.ping_node(node)
+
+ def _validate_cluster_node(self):
+ """
+ Validate cluster_node on join side
+ """
+ if self.cluster_node and self.type == 'join':
+ user, node = _parse_user_at_host(self.cluster_node, None)
+ try:
+ # self.cluster_node might be hostname or IP address
+ ip_addr = socket.gethostbyname(node)
+ if utils.InterfacesInfo.ip_in_local(ip_addr):
+ utils.fatal("Please specify peer node's hostname or IP address")
+ except socket.gaierror as err:
+ utils.fatal("\"{}\": {}".format(node, err))
+
+ def validate_option(self):
+ """
+ Validate options
+ """
+ if self.admin_ip:
+ Validation.valid_admin_ip(self.admin_ip)
+ if self.qdevice_inst:
+ self.qdevice_inst.valid_qdevice_options()
+ if self.nic_list:
+ if len(self.nic_list) > 2:
+ utils.fatal("Maximum number of interface is 2")
+ if self.no_overwrite_sshkey:
+ logger.warning("--no-overwrite-sshkey option is deprecated since crmsh does not overwrite ssh keys by default anymore and will be removed in future versions")
+ if self.type == "join" and self.watchdog:
+ logger.warning("-w option is deprecated and will be removed in future versions")
+ if self.ocfs2_devices or self.stage == "ocfs2":
+ ocfs2.OCFS2Manager.verify_ocfs2(self)
+ if not self.skip_csync2 and self.type == "init":
+ self.skip_csync2 = utils.get_boolean(os.getenv("SKIP_CSYNC2_SYNC"))
+ if self.skip_csync2 and self.stage:
+ utils.fatal("-x option or SKIP_CSYNC2_SYNC can't be used with any stage")
+ self._validate_cluster_node()
+ self._validate_nodes_option()
+ self._validate_sbd_option()
+
+ def init_sbd_manager(self):
+ from .sbd import SBDManager
+ self.sbd_manager = SBDManager(self)
+
+ def detect_platform(self):
+ """
+ Detect platform
+ Return profile type for different platform
+ """
+ profile_type = None
+
+ self.is_s390 = "390" in os.uname().machine
+ if self.is_s390:
+ profile_type = self.S390_PROFILE_NAME
+ else:
+ self.cloud_type = utils.detect_cloud()
+ if self.cloud_type:
+ profile_type = self.cloud_type
+
+ if profile_type:
+ logger.info("Detected \"{}\" platform".format(profile_type))
+ return profile_type
+
+ def load_specific_profile(self, profile_type):
+ """
+ Load specific profile
+ """
+ profile_dict = {}
+ if not profile_type:
+ return profile_dict
+
+ if profile_type in self.profiles_data:
+ logger.info("Loading \"{}\" profile from {}".format(profile_type, PROFILES_FILE))
+ profile_dict = self.profiles_data[profile_type]
+ else:
+ logger.info("\"{}\" profile does not exist in {}".format(profile_type, PROFILES_FILE))
+ return profile_dict
+
+ def load_profiles(self):
+ """
+ Load profiles data for different environment
+ """
+ profile_type = self.detect_platform()
+
+ if not os.path.exists(PROFILES_FILE):
+ return
+ with open(PROFILES_FILE) as f:
+ self.profiles_data = yaml.load(f, Loader=yaml.SafeLoader)
+ # empty file
+ if not self.profiles_data:
+ return
+
+ default_profile_dict = self.load_specific_profile(self.DEFAULT_PROFILE_NAME)
+ specific_profile_dict = self.load_specific_profile(profile_type)
+ # merge two dictionaries
+ self.profiles_dict = {**default_profile_dict, **specific_profile_dict}
+
+
+_context: typing.Optional[Context] = None
+
+
+def drop_last_history():
+ hlen = readline.get_current_history_length()
+ if hlen > 0:
+ readline.remove_history_item(hlen - 1)
+
+
+def prompt_for_string(msg, match=None, default='', valid_func=None, prev_value=[], allow_empty=False):
+ if _context.yes_to_all:
+ return default
+
+ while True:
+ disable_completion()
+ val = logger_utils.wait_input("{} [{}]".format(msg, default), default)
+ enable_completion()
+ if val:
+ drop_last_history()
+ elif allow_empty:
+ return None
+ else:
+ continue
+ if not match and not valid_func:
+ return val
+ if match and not re.match(match, val):
+ logger.error("Invalid value entered")
+ continue
+ if valid_func:
+ try:
+ if prev_value:
+ valid_func(val, prev_value)
+ else:
+ valid_func(val)
+ except ValueError as err:
+ logger.error(err)
+ continue
+
+ return val
+
+
+def confirm(msg):
+ if _context.yes_to_all:
+ return True
+ disable_completion()
+ rc = logger_utils.confirm(msg)
+ enable_completion()
+ drop_last_history()
+ return rc
+
+
+def disable_completion():
+ if _context.ui_context:
+ _context.ui_context.disable_completion()
+
+
+def enable_completion():
+ if _context.ui_context:
+ _context.ui_context.setup_readline()
+
+
+def invoke(*args):
+ """
+ Log command execution to log file.
+ Log output from command to log file.
+ Return (boolean, stdout, stderr)
+ """
+ logger_utils.log_only_to_file("invoke: " + " ".join(args))
+ rc, stdout, stderr = ShellUtils().get_stdout_stderr(" ".join(args))
+ if stdout:
+ logger_utils.log_only_to_file("stdout: {}".format(stdout))
+ if stderr:
+ logger_utils.log_only_to_file("stderr: {}".format(stderr))
+ return rc == 0, stdout, stderr
+
+
+def invokerc(*args):
+ """
+ Calling invoke, return True/False
+ """
+ rc, _, _ = invoke(*args)
+ return rc
+
+
+def crm_configure_load(action, configuration):
+ action_types = ("update", "replace", "push")
+ if action not in action_types:
+ utils.fatal(f"Action type should be: {action_types}")
+ logger_utils.log_only_to_file("Loading crm config (%s), content is:" % (action))
+ logger_utils.log_only_to_file(configuration)
+
+ configuration_tmpfile = utils.str2tmp(configuration)
+ tmpfiles.add(configuration_tmpfile)
+ sh.cluster_shell().get_stdout_or_raise_error(f"crm -F configure load {action} {configuration_tmpfile}")
+
+
+def wait_for_resource(message, resource, timeout_ms=WAIT_TIMEOUT_MS_DEFAULT):
+ """
+ Wait for resource started
+ """
+ with logger_utils.status_long(message) as progress_bar:
+ start_time = int(time.clock_gettime(time.CLOCK_MONOTONIC) * 1000)
+ while True:
+ if xmlutil.CrmMonXmlParser().is_resource_started(resource):
+ break
+ status_progress(progress_bar)
+ if 0 < timeout_ms <= (int(time.clock_gettime(time.CLOCK_MONOTONIC) * 1000) - start_time):
+ utils.fatal('Time out waiting for resource.')
+ sleep(1)
+
+
+def wait_for_cluster(timeout_ms=WAIT_TIMEOUT_MS_DEFAULT):
+ with logger_utils.status_long("Waiting for cluster") as progress_bar:
+ start_time = int(time.clock_gettime(time.CLOCK_MONOTONIC) * 1000)
+ while True:
+ if is_online():
+ break
+ status_progress(progress_bar)
+ if 0 < timeout_ms <= (int(time.clock_gettime(time.CLOCK_MONOTONIC) * 1000) - start_time):
+ utils.fatal('Time out waiting for cluster.')
+ sleep(2)
+
+
+def get_node_canonical_hostname(host: str) -> str:
+ """
+ Get the canonical hostname of the cluster node
+ """
+ rc, out, err = sh.cluster_shell().get_rc_stdout_stderr_without_input(host, 'crm_node --name')
+ if rc != 0:
+ utils.fatal(err)
+ return out
+
+
+def is_online():
+ """
+ Check whether local node is online
+ Besides that, in join process, check whether init node is online
+ """
+ if not xmlutil.CrmMonXmlParser().is_node_online(utils.this_node()):
+ return False
+
+ # if peer_node is None, this is in the init process
+ if _context.cluster_node is None:
+ return True
+ # In join process
+ # If the joining node is already online but can't find the init node
+ # The communication IP maybe mis-configured
+ user, cluster_node = _parse_user_at_host(_context.cluster_node, None)
+ cluster_node = get_node_canonical_hostname(cluster_node)
+ if not xmlutil.CrmMonXmlParser().is_node_online(cluster_node):
+ shutil.copy(COROSYNC_CONF_ORIG, corosync.conf())
+ sync_file(corosync.conf())
+ ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).stop_service("corosync")
+ print()
+ utils.fatal("Cannot see peer node \"{}\", please check the communication IP".format(cluster_node))
+ return True
+
+
+def pick_default_value(default_list, prev_list):
+ """
+ Provide default value for function 'prompt_for_string'.
+ Make sure give different default value in multi-ring mode.
+
+ Parameters:
+ * default_list - default value list for config item
+ * prev_list - previous value for config item in multi-ring mode
+ """
+ for value in default_list:
+ if value not in prev_list:
+ return value
+ return ""
+
+
+def sleep(t):
+ """
+ Sleep for t seconds.
+ """
+ t = float(t)
+ time.sleep(t)
+
+
+def status_progress(progress_bar):
+ if not _context or not _context.quiet:
+ progress_bar.progress()
+
+
+def partprobe():
+ # This function uses fdisk to create a list of valid devices for probing
+ # with partprobe. This prevents partprobe from failing on read-only mounted
+ # devices such as /dev/sr0 (etc) that might cause it to return an error when
+ # it exits. This allows partprobe to run without forcing _die to bail out.
+ # -Brandon Heaton
+ # ATT Training Engineer
+ # Data Center Engineer
+ # bheaton@suse.com
+ _rc, out, _err = ShellUtils().get_stdout_stderr("sfdisk -l")
+ disks = re.findall(r'^Disk\s*(/.+):', out, re.M)
+ invoke("partprobe", *disks)
+
+
+def probe_partitions():
+ # Need to do this if second (or subsequent) node happens to be up and
+ # connected to storage while it's being repartitioned on the first node.
+ with logger_utils.status_long("Probing for new partitions"):
+ partprobe()
+ sleep(5)
+
+
+def check_tty():
+ """
+ Check for pseudo-tty: Cannot display read prompts without a TTY (bnc#892702)
+ """
+ if _context.yes_to_all:
+ return
+ if not sys.stdin.isatty():
+ utils.fatal("No pseudo-tty detected! Use -t option to ssh if calling remotely.")
+
+
+def my_hostname_resolves():
+ import socket
+ hostname = utils.this_node()
+ try:
+ socket.gethostbyname(hostname)
+ return True
+ except socket.error:
+ return False
+
+
+def check_prereqs(stage):
+ warned = False
+
+ if not my_hostname_resolves():
+ logger.warning("Hostname '{}' is unresolvable. {}".format(
+ utils.this_node(),
+ "Please add an entry to /etc/hosts or configure DNS."))
+ warned = True
+
+ timekeepers = ('chronyd.service', 'ntp.service', 'ntpd.service')
+ timekeeper = None
+ service_manager = ServiceManager()
+ for tk in timekeepers:
+ if service_manager.service_is_available(tk):
+ timekeeper = tk
+ break
+
+ if timekeeper is None:
+ logger.warning("No NTP service found.")
+ warned = True
+ elif not service_manager.service_is_enabled(timekeeper):
+ logger.warning("{} is not configured to start at system boot.".format(timekeeper))
+ warned = True
+
+ if warned:
+ if not confirm("Do you want to continue anyway?"):
+ return False
+
+ firewall_open_basic_ports()
+ return True
+
+
+def log_start():
+ """
+ Convenient side-effect: this will die immediately if the log file
+ is not writable (e.g. if not running as root)
+ """
+ logger_utils.log_only_to_file('================================================================')
+ logger_utils.log_only_to_file(" ".join(sys.argv))
+ logger_utils.log_only_to_file('----------------------------------------------------------------')
+
+
+def init_network():
+ """
+ Get all needed network information through utils.InterfacesInfo
+ """
+ interfaces_inst = utils.InterfacesInfo(_context.ipv6, _context.second_heartbeat, _context.nic_list)
+ interfaces_inst.get_interfaces_info()
+ _context.default_nic_list = interfaces_inst.get_default_nic_list_from_route()
+ _context.default_ip_list = interfaces_inst.get_default_ip_list()
+
+ # local_ip_list and local_network_list are for validation
+ _context.local_ip_list = interfaces_inst.ip_list
+ _context.local_network_list = interfaces_inst.network_list
+ _context.interfaces_inst = interfaces_inst
+ # use two "-i" options equal to use "-M" option
+ if len(_context.default_nic_list) == 2 and not _context.second_heartbeat:
+ _context.second_heartbeat = True
+
+
+def configure_firewall(tcp=None, udp=None):
+ if tcp is None:
+ tcp = []
+ if udp is None:
+ udp = []
+
+ def init_firewall_suse(tcp, udp):
+ if os.path.exists(SYSCONFIG_FW_CLUSTER):
+ cluster = utils.parse_sysconfig(SYSCONFIG_FW_CLUSTER)
+ tcpcurr = set(cluster.get("TCP", "").split())
+ tcpcurr.update(tcp)
+ tcp = list(tcpcurr)
+ udpcurr = set(cluster.get("UDP", "").split())
+ udpcurr.update(udp)
+ udp = list(udpcurr)
+
+ utils.sysconfig_set(SYSCONFIG_FW_CLUSTER, TCP=" ".join(tcp), UDP=" ".join(udp))
+
+ ext = ""
+ if os.path.exists(SYSCONFIG_FW):
+ fw = utils.parse_sysconfig(SYSCONFIG_FW)
+ ext = fw.get("FW_CONFIGURATIONS_EXT", "")
+ if "cluster" not in ext.split():
+ ext = ext + " cluster"
+ utils.sysconfig_set(SYSCONFIG_FW, FW_CONFIGURATIONS_EXT=ext)
+
+ # No need to do anything else if the firewall is inactive
+ if not ServiceManager().service_is_active("SuSEfirewall2"):
+ return
+
+ # Firewall is active, either restart or complain if we couldn't tweak it
+ logger.info("Restarting firewall (tcp={}, udp={})".format(" ".join(tcp), " ".join(udp)))
+ if not invokerc("rcSuSEfirewall2 restart"):
+ utils.fatal("Failed to restart firewall (SuSEfirewall2)")
+
+ def init_firewall_firewalld(tcp, udp):
+ has_firewalld = ServiceManager().service_is_active("firewalld")
+ cmdbase = 'firewall-cmd --zone=public --permanent ' if has_firewalld else 'firewall-offline-cmd --zone=public '
+
+ def cmd(args):
+ if not invokerc(cmdbase + args):
+ utils.fatal("Failed to configure firewall.")
+
+ for p in tcp:
+ cmd("--add-port={}/tcp".format(p))
+
+ for p in udp:
+ cmd("--add-port={}/udp".format(p))
+
+ if has_firewalld:
+ if not invokerc("firewall-cmd --reload"):
+ utils.fatal("Failed to reload firewall configuration.")
+
+ def init_firewall_ufw(tcp, udp):
+ """
+ try configuring firewall with ufw
+ """
+ for p in tcp:
+ if not invokerc("ufw allow {}/tcp".format(p)):
+ utils.fatal("Failed to configure firewall (ufw)")
+ for p in udp:
+ if not invokerc("ufw allow {}/udp".format(p)):
+ utils.fatal("Failed to configure firewall (ufw)")
+
+ if utils.package_is_installed("firewalld"):
+ init_firewall_firewalld(tcp, udp)
+ elif utils.package_is_installed("SuSEfirewall2"):
+ init_firewall_suse(tcp, udp)
+ elif utils.package_is_installed("ufw"):
+ init_firewall_ufw(tcp, udp)
+
+
+def firewall_open_basic_ports():
+ """
+ Open ports for csync2, hawk & dlm respectively
+ """
+ configure_firewall(tcp=["30865", "7630", "21064"])
+
+
+def firewall_open_corosync_ports():
+ """
+ Have to do this separately, as we need general firewall config early
+ so csync2 works, but need corosync config *after* corosync.conf has
+ been created/updated.
+
+ Please note corosync uses two UDP ports mcastport (for mcast
+ receives) and mcastport - 1 (for mcast sends).
+
+ Also open QNetd/QDevice port if configured.
+ """
+ # all mcastports defined in corosync config
+ udp = corosync.get_values("totem.interface.mcastport")
+ udp.extend([str(int(p) - 1) for p in udp])
+
+ tcp = corosync.get_values("totem.quorum.device.net.port")
+
+ configure_firewall(tcp=tcp, udp=udp)
+
+
+def init_cluster_local():
+ # Caller should check this, but I'm paranoid...
+ if ServiceManager().service_is_active("corosync.service"):
+ utils.fatal("corosync service is running!")
+
+ firewall_open_corosync_ports()
+
+ # reset password, but only if it's not already set
+ # (We still need the hacluster for the hawk).
+ _rc, outp = ShellUtils().get_stdout("passwd -S hacluster")
+ ps = outp.strip().split()[1]
+ pass_msg = ""
+ if ps not in ("P", "PS"):
+ logger_utils.log_only_to_file(': Resetting password of hacluster user')
+ rc, outp, errp = ShellUtils().get_stdout_stderr("passwd hacluster", input_s=b"linux\nlinux\n")
+ if rc != 0:
+ logger.warning("Failed to reset password of hacluster user: %s" % (outp + errp))
+ else:
+ pass_msg = ", password 'linux'"
+
+ # evil, but necessary
+ invoke("rm -f /var/lib/heartbeat/crm/* /var/lib/pacemaker/cib/*")
+
+ # only try to start hawk if hawk is installed
+ service_manager = ServiceManager()
+ if service_manager.service_is_available("hawk.service"):
+ service_manager.start_service("hawk.service", enable=True)
+ logger.info("Hawk cluster interface is now running. To see cluster status, open:")
+ logger.info(" https://{}:7630/".format(_context.default_ip_list[0]))
+ logger.info("Log in with username 'hacluster'{}".format(pass_msg))
+ else:
+ logger.warning("Hawk not installed - not configuring web management interface.")
+
+ if pass_msg:
+ logger.warning("You should change the hacluster password to something more secure!")
+
+ start_pacemaker(enable_flag=True)
+ wait_for_cluster()
+
+
+def start_pacemaker(node_list=[], enable_flag=False):
+ """
+ Start pacemaker service with wait time for sbd
+ When node_list set, start pacemaker service in parallel
+
+ Return success node list
+ """
+ from .sbd import SBDTimeout
+ # not _context means not in init or join process
+ if not _context and \
+ utils.package_is_installed("sbd") and \
+ ServiceManager().service_is_enabled("sbd.service") and \
+ SBDTimeout.is_sbd_delay_start():
+ target_dir = "/run/systemd/system/sbd.service.d/"
+ cmd1 = "mkdir -p {}".format(target_dir)
+ target_file = "{}sbd_delay_start_disabled.conf".format(target_dir)
+ cmd2 = "echo -e '[Service]\nUnsetEnvironment=SBD_DELAY_START' > {}".format(target_file)
+ cmd3 = "systemctl daemon-reload"
+ for cmd in [cmd1, cmd2, cmd3]:
+ parallax.parallax_call(node_list, cmd)
+
+ # To avoid possible JOIN flood in corosync
+ service_manager = ServiceManager()
+ if len(node_list) > 5:
+ for node in node_list[:]:
+ time.sleep(0.25)
+ try:
+ service_manager.start_service("corosync.service", remote_addr=node)
+ except ValueError as err:
+ node_list.remove(node)
+ logger.error(err)
+ return service_manager.start_service("pacemaker.service", enable=enable_flag, node_list=node_list)
+
+
+def append(fromfile, tofile, remote=None):
+ cmd = "cat {} >> {}".format(fromfile, tofile)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd, host=remote)
+
+
+def append_unique(fromfile, tofile, user=None, remote=None, from_local=False):
+ """
+ Append unique content from fromfile to tofile
+
+ if from_local and remote:
+ append local fromfile to remote tofile
+ elif remote:
+ append remote fromfile to remote tofile
+ if not remote:
+ append fromfile to tofile, locally
+ """
+ if not utils.check_file_content_included(fromfile, tofile, remote=remote, source_local=from_local):
+ if from_local and remote:
+ append_to_remote_file(fromfile, user, remote, tofile)
+ else:
+ append(fromfile, tofile, remote=remote)
+
+
+def _parse_user_at_host(s: str, default_user: str) -> typing.Tuple[str, str]:
+ user, host = utils.parse_user_at_host(s)
+ if user is None:
+ user = default_user
+ return user, host
+
+
+def init_ssh():
+ user_host_list = [_parse_user_at_host(x, _context.current_user) for x in _context.user_at_node_list]
+ if _context.use_ssh_agent:
+ try:
+ ssh_agent = ssh_key.AgentClient()
+ keys = ssh_agent.list()
+ logger.info("Using public keys from ssh-agent...")
+ except ssh_key.Error:
+ logger.error("Cannot get a public key from ssh-agent.")
+ raise
+ else:
+ keys = list()
+ init_ssh_impl(_context.current_user, keys, user_host_list)
+ if user_host_list:
+ service_manager = ServiceManager()
+ for user, node in user_host_list:
+ if service_manager.service_is_active("pacemaker.service", remote_addr=node):
+ utils.fatal("Cluster is currently active on {} - can't run".format(node))
+
+
+def init_ssh_impl(local_user: str, ssh_public_keys: typing.List[ssh_key.Key], user_node_list: typing.List[typing.Tuple[str, str]]):
+ """ Configure passwordless SSH.
+
+ The local_user on local host will be configured.
+ If user_node_list is not empty, those user and host will also be configured.
+ If ssh_public_keys is not empty, it will be added to authorized_keys; if not, a new key pair will be generated for each node.
+ """
+ ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).start_service("sshd.service", enable=True)
+ if ssh_public_keys:
+ local_shell = sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK')})
+ else:
+ local_shell = sh.LocalShell()
+ shell = sh.SSHShell(local_shell, local_user)
+ authorized_key_manager = ssh_key.AuthorizedKeyManager(shell)
+ if ssh_public_keys:
+ # Use specified key. Do not generate new ones.
+ logger.info("Adding public keys to authorized_keys for user %s...", local_user)
+ for key in ssh_public_keys:
+ authorized_key_manager.add(None, local_user, key)
+ else:
+ configure_ssh_key(local_user)
+ configure_ssh_key('hacluster')
+ change_user_shell('hacluster')
+
+ user_by_host = utils.HostUserConfig()
+ user_by_host.set_no_generating_ssh_key(bool(ssh_public_keys))
+ if user_node_list:
+ print()
+ if ssh_public_keys:
+ for user, node in user_node_list:
+ logger.info("Adding public keys to authorized_keys on %s@%s", user, node)
+ for key in ssh_public_keys:
+ authorized_key_manager.add(node, local_user, key)
+ if user != 'root' and 0 != shell.subprocess_run_without_input(
+ node, user, 'sudo true',
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ ).returncode:
+ raise ValueError(f'Failed to sudo on {user}@{node}')
+ else:
+ _init_ssh_on_remote_nodes(local_user, user_node_list)
+ for user, node in user_node_list:
+ user_by_host.add(user, node)
+ user_by_host.add(local_user, utils.this_node())
+ user_by_host.save_local()
+ # Starting from here, ClusterShell is available
+ shell = sh.ClusterShell(local_shell, UserOfHost.instance())
+ authorized_key_manager = ssh_key.AuthorizedKeyManager(shell)
+ _init_ssh_for_secondary_user_on_remote_nodes(
+ shell, authorized_key_manager,
+ [node for user, node in user_node_list],
+ 'hacluster',
+ )
+ for user, node in user_node_list:
+ change_user_shell('hacluster', node)
+ user_by_host.save_remote([node for user, node in user_node_list])
+
+
+def _init_ssh_on_remote_nodes(
+ local_user: str,
+ user_node_list: typing.List[typing.Tuple[str, str]],
+):
+ # Swap public ssh key between remote node and local
+ public_key_list = list()
+ for i, (remote_user, node) in enumerate(user_node_list):
+ utils.ssh_copy_id(local_user, remote_user, node)
+ # After this, login to remote_node is passwordless
+ public_key_list.append(swap_public_ssh_key(node, local_user, remote_user, local_user, remote_user, add=True))
+ if len(user_node_list) > 1:
+ shell = sh.LocalShell()
+ shell_script = _merge_authorized_keys(public_key_list)
+ for i, (remote_user, node) in enumerate(user_node_list):
+ result = shell.su_subprocess_run(
+ local_user,
+ 'ssh {} {}@{} /bin/sh'.format(constants.SSH_OPTION, remote_user, node),
+ input=shell_script,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ if result.returncode != 0:
+ utils.fatal('Failed to add public keys to {}@{}: {}'.format(remote_user, node, result.stdout))
+
+
+def _init_ssh_for_secondary_user_on_remote_nodes(
+ cluster_shell: sh.ClusterShell,
+ authorized_key_manager: ssh_key.AuthorizedKeyManager,
+ nodes: typing.Iterable[str],
+ user: str,
+):
+ """Initialize ssh for another user via an already working ClusterShell."""
+ key_file_manager = ssh_key.KeyFileManager(cluster_shell)
+ local_keys = [ssh_key.KeyFile(path) for path in key_file_manager.list_public_key_for_user(None, user)]
+ assert local_keys
+ for node in nodes:
+ if not sh.SSHShell(cluster_shell.local_shell, user).can_run_as(node, user):
+ for key in local_keys:
+ authorized_key_manager.add(node, user, key)
+ is_generated, remote_keys = key_file_manager.ensure_key_pair_exists_for_user(node, user)
+ if is_generated:
+ logger.info("A new ssh keypair is generated for user %s@%s.", user, node)
+ for key in remote_keys:
+ authorized_key_manager.add(None, user, key)
+
+
+
+def _merge_authorized_keys(keys: typing.List[str]) -> bytes:
+ shell_script = '''for key in "${keys[@]}"; do
+ grep -F "$key" ~/.ssh/authorized_keys > /dev/null || sed -i "\\$a $key" ~/.ssh/authorized_keys
+ done'''
+ keys_definition = ("keys+=('{}')\n".format(key) for key in keys)
+ buf = bytearray()
+ for item in keys_definition:
+ buf.extend(item.encode('utf-8'))
+ buf.extend(shell_script.encode('utf-8'))
+ return buf
+
+
+def _fetch_core_hosts(shell: sh.ClusterShell, remote_host) -> typing.Tuple[typing.List[str], typing.List[str]]:
+ cmd = 'crm options show core.hosts'
+ text = shell.get_stdout_or_raise_error(cmd, remote_host)
+ match = re.match('core\\.hosts\\s*=\\s*(.*)\\s*', text)
+ if match is None:
+ utils.fatal('Malformed core.hosts from host {}: {}'.format(remote_host, text))
+ user_list = list()
+ host_list = list()
+ for item in re.split(',\\s*', match.group(1)):
+ part = item.split('@', 2)
+ if len(part) != 2:
+ utils.fatal('Malformed core.hosts from host {}: {}'.format(remote_host, text))
+ user_list.append(part[0])
+ host_list.append(part[1])
+ return user_list, host_list
+
+
+def key_files(user):
+ """
+ Find home directory for user and return key files with abspath
+ """
+ keyfile_dict = {}
+ home_dir = userdir.gethomedir(user)
+ keyfile_dict['private'] = "{}/.ssh/id_rsa".format(home_dir)
+ keyfile_dict['public'] = "{}/.ssh/id_rsa.pub".format(home_dir)
+ keyfile_dict['authorized'] = "{}/.ssh/authorized_keys".format(home_dir)
+ return keyfile_dict
+
+
+def is_nologin(user, remote=None):
+ """
+ Check if user's shell is nologin
+ """
+ passwd_file = "/etc/passwd"
+ pattern = f"{user}:.*:/.*/nologin"
+ if remote:
+ cmd = f"cat {passwd_file}|grep {pattern}"
+ rc, _, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(remote, cmd)
+ return rc == 0
+ else:
+ with open(passwd_file) as f:
+ return re.search(pattern, f.read()) is not None
+
+
+def change_user_shell(user, remote=None):
+ """
+ To change user's login shell
+ """
+ user_msg = f"'{user}' on {remote}" if remote else f"'{user}'"
+ message = f"The user {user_msg} will have the login shell configuration changed to /bin/bash"
+ if user != "root" and is_nologin(user, remote):
+ if _context is not None and not _context.yes_to_all:
+ logger.info(message)
+ if not confirm("Continue?"):
+ _context.with_other_user = False
+ return
+ cmd = f"usermod -s /bin/bash {user}"
+ sh.cluster_shell().get_stdout_or_raise_error(cmd, remote)
+
+
+def configure_ssh_key(user):
+ """
+ Configure ssh rsa key on local or remote
+
+ If <home_dir>/.ssh/id_rsa not exist, generate a new one
+ Add <home_dir>/.ssh/id_rsa.pub to <home_dir>/.ssh/authorized_keys anyway, make sure itself authorized
+ """
+ change_user_shell(user)
+ shell = sh.LocalShell()
+ key_file_manager = ssh_key.KeyFileManager(sh.ClusterShellAdaptorForLocalShell(shell))
+ authorized_key_manager = ssh_key.AuthorizedKeyManager(sh.SSHShell(shell, None))
+ is_generated, keys = key_file_manager.ensure_key_pair_exists_for_user(None, user)
+ if is_generated:
+ logger.info("A new ssh keypair is generated for user %s.", user)
+ authorized_key_manager.add(None, user, keys[0])
+
+
+def generate_ssh_key_pair_on_remote(
+ local_sudoer: str,
+ remote_host: str, remote_sudoer: str,
+ remote_user: str
+) -> str:
+ """generate a key pair on remote and return the public key"""
+ shell = sh.LocalShell()
+ # pass cmd through stdin rather than as arguments. It seems sudo has its own argument parsing mechanics,
+ # which breaks shell expansion used in cmd
+ cmd = '''
+[ -f ~/.ssh/id_rsa ] || ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -C "Cluster internal on $(hostname)" -N ''
+[ -f ~/.ssh/id_rsa.pub ] || ssh-keygen -y -f ~/.ssh/id_rsa > ~/.ssh/id_rsa.pub
+'''
+ result = shell.su_subprocess_run(
+ local_sudoer,
+ 'ssh {} {}@{} sudo -H -u {} /bin/sh'.format(constants.SSH_OPTION, remote_sudoer, remote_host, remote_user),
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ if result.returncode != 0:
+ raise ValueError(codecs.decode(result.stdout, 'utf-8', 'replace'))
+
+ cmd = 'cat ~/.ssh/id_rsa.pub'
+ result = shell.su_subprocess_run(
+ local_sudoer,
+ 'ssh {} {}@{} sudo -H -u {} /bin/sh'.format(constants.SSH_OPTION, remote_sudoer, remote_host, remote_user),
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ if result.returncode != 0:
+ raise ValueError(codecs.decode(result.stderr, 'utf-8', 'replace'))
+ return result.stdout.decode('utf-8').strip()
+
+
+def init_ssh_remote():
+ """
+ Called by ha-cluster-join
+ """
+ user = userdir.get_sudoer()
+ if user is None:
+ user = userdir.getuser()
+ _, _, authorized_keys_file = key_files(user).values()
+ if not os.path.exists(authorized_keys_file):
+ open(authorized_keys_file, 'w').close()
+ authkeys = open(authorized_keys_file, "r+")
+ authkeys_data = authkeys.read()
+ dirname = os.path.dirname(authorized_keys_file)
+ for key in ("id_rsa", "id_dsa", "id_ecdsa", "id_ed25519"):
+ fn = os.path.join(dirname, key)
+ if not os.path.exists(fn):
+ continue
+ keydata = open(fn + ".pub").read()
+ if keydata not in authkeys_data:
+ append(fn + ".pub", authorized_keys_file)
+
+
+def export_ssh_key_non_interactive(local_user_to_export, remote_user_to_swap, remote_node, local_sudoer, remote_sudoer):
+ """Copy ssh key from local to remote's authorized_keys. Require a configured non-interactive ssh authentication."""
+ # ssh-copy-id will prompt for the password of the destination user
+ # this is unwanted, so we write to the authorised_keys file ourselve
+ # cmd = "ssh-copy-id -i ~{}/.ssh/id_rsa.pub {}@{}".format(local_user, remote_user_to_access, remote_node)
+ with open(os.path.expanduser('~{}/.ssh/id_rsa.pub'.format(local_user_to_export)), 'r', encoding='utf-8') as f:
+ public_key = f.read()
+ # FIXME: prevent duplicated entries in authorized_keys
+ cmd = '''mkdir -p ~{user}/.ssh && chown {user} ~{user}/.ssh && chmod 0700 ~{user}/.ssh && cat >> ~{user}/.ssh/authorized_keys << "EOF"
+{key}
+EOF
+'''.format(user=remote_user_to_swap, key=public_key)
+ result = sh.LocalShell().su_subprocess_run(
+ local_sudoer,
+ 'ssh {} {}@{} sudo /bin/sh'.format(constants.SSH_OPTION, remote_sudoer, remote_node),
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ if result.returncode != 0:
+ raise ValueError('Failed to export ssh public key of local user {} to {}@{}: {}'.format(
+ local_user_to_export, remote_user_to_swap, remote_node, result.stdout,
+ ))
+
+
+def import_ssh_key(local_user, remote_user, local_sudoer, remote_node, remote_sudoer):
+ "Copy ssh key from remote to local authorized_keys"
+ remote_key_content = remote_public_key_from(remote_user, local_sudoer, remote_node, remote_sudoer)
+ _, _, local_authorized_file = key_files(local_user).values()
+ if not utils.check_text_included(remote_key_content, local_authorized_file, remote=None):
+ sh.LocalShell().get_stdout_or_raise_error(
+ local_user,
+ "sed -i '$a {}' '{}'".format(remote_key_content, local_authorized_file),
+ )
+
+def append_to_remote_file(fromfile, user, remote_node, tofile):
+ """
+ Append content of fromfile to tofile on remote_node
+ """
+ cmd = "cat {} | ssh {} {}@{} 'cat >> {}'".format(fromfile, SSH_OPTION, user, remote_node, tofile)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+
+def init_csync2():
+ host_list = _context.node_list_in_cluster
+
+ logger.info("Configuring csync2")
+ if os.path.exists(CSYNC2_KEY):
+ if not confirm("csync2 is already configured - overwrite?"):
+ return
+
+ invoke("rm", "-f", CSYNC2_KEY)
+ logger.debug("Generating csync2 shared key")
+ if not invokerc("csync2", "-k", CSYNC2_KEY):
+ utils.fatal("Can't create csync2 key {}".format(CSYNC2_KEY))
+
+ csync2_file_list = ""
+ for f in FILES_TO_SYNC:
+ csync2_file_list += "include {};\n".format(f)
+
+ host_str = ""
+ for host in host_list:
+ host_str += 'host {};\n'.format(host)
+
+ utils.str2file("""group ha_group
+{{
+key /etc/csync2/key_hagroup;
+{}
+{}
+}}
+ """.format(host_str, csync2_file_list), CSYNC2_CFG)
+
+ if _context.skip_csync2:
+ for f in [CSYNC2_CFG, CSYNC2_KEY]:
+ sync_file(f)
+
+ service_manager = ServiceManager()
+ for host in host_list:
+ logger.info("Starting {} service on {}".format(CSYNC2_SERVICE, host))
+ service_manager.start_service(CSYNC2_SERVICE, enable=True, remote_addr=host)
+
+ _msg = "syncing" if _context.skip_csync2 else "checking"
+ with logger_utils.status_long("csync2 {} files".format(_msg)):
+ if _context.skip_csync2:
+ csync2_update("/")
+ else:
+ invoke("csync2", "-cr", "/")
+
+
+def csync2_update(path):
+ '''
+ Sync path to all peers
+
+ If there was a conflict, use '-f' to force this side to win
+ '''
+ invoke("csync2 -rm {}".format(path))
+ if invokerc("csync2 -rxv {}".format(path)):
+ return
+ invoke("csync2 -rf {}".format(path))
+ if not invokerc("csync2 -rxv {}".format(path)):
+ logger.warning("{} was not synced".format(path))
+
+
+def init_csync2_remote():
+ """
+ It would be nice if we could just have csync2.cfg include a directory,
+ which in turn included one file per node which would be referenced via
+ something like "group ha_group { ... config: /etc/csync2/hosts/*; }"
+ That way, adding a new node would just mean adding a single new file
+ to that directory. Unfortunately, the 'config' statement only allows
+ inclusion of specific individual files, not multiple files via wildcard.
+ So we have this function which is called by ha-cluster-join to add the new
+ remote node to csync2 config on some existing node. It is intentionally
+ not documented in ha-cluster-init's user-visible usage information.
+ """
+ if not _context.cluster_node:
+ utils.fatal("Hostname not specified")
+ user, newhost = _parse_user_at_host(_context.cluster_node, _context.current_user)
+
+ curr_cfg = open(CSYNC2_CFG).read()
+
+ was_quiet = _context.quiet
+ try:
+ _context.quiet = True
+ # if host doesn't already exist in csync2 config, add it
+ if not re.search(r"^\s*host.*\s+%s\s*;" % (newhost), curr_cfg, flags=re.M):
+ curr_cfg = re.sub(r"\bhost.*\s+\S+\s*;", r"\g<0>\n\thost %s;" % (utils.doublequote(newhost)), curr_cfg, count=1)
+ utils.str2file(curr_cfg, CSYNC2_CFG)
+ else:
+ logger_utils.log_only_to_file(": Not updating %s - remote host %s already exists" % (CSYNC2_CFG, newhost))
+ finally:
+ _context.quiet = was_quiet
+
+
+def init_corosync_auth():
+ """
+ Generate the corosync authkey
+ """
+ if os.path.exists(COROSYNC_AUTH):
+ if not confirm("%s already exists - overwrite?" % (COROSYNC_AUTH)):
+ return
+ utils.rmfile(COROSYNC_AUTH)
+ invoke("corosync-keygen -l -k {}".format(COROSYNC_AUTH))
+
+
+def init_remote_auth():
+ """
+ Generate the pacemaker-remote authkey
+ """
+ if os.path.exists(PCMK_REMOTE_AUTH):
+ if not confirm("%s already exists - overwrite?" % (PCMK_REMOTE_AUTH)):
+ return
+ utils.rmfile(PCMK_REMOTE_AUTH)
+
+ pcmk_remote_dir = os.path.dirname(PCMK_REMOTE_AUTH)
+ utils.mkdirs_owned(pcmk_remote_dir, mode=0o750, gid="haclient")
+ if not invokerc("dd if=/dev/urandom of={} bs=4096 count=1".format(PCMK_REMOTE_AUTH)):
+ logger.warning("Failed to create pacemaker authkey: {}".format(PCMK_REMOTE_AUTH))
+ utils.chown(PCMK_REMOTE_AUTH, _context.current_user, "haclient")
+ utils.chmod(PCMK_REMOTE_AUTH, 0o640)
+
+
+class Validation(object):
+ """
+ Class to validate values from interactive inputs
+ """
+
+ def __init__(self, value, prev_value_list=[]):
+ """
+ Init function
+ """
+ self.value = value
+ self.prev_value_list = prev_value_list
+ if self.value in self.prev_value_list:
+ raise ValueError("Already in use: {}".format(self.value))
+
+ def _is_mcast_addr(self):
+ """
+ Check whether the address is multicast address
+ """
+ if not utils.IP.is_mcast(self.value):
+ raise ValueError("{} is not multicast address".format(self.value))
+
+ def _is_local_addr(self, local_addr_list):
+ """
+ Check whether the address is in local
+ """
+ if self.value not in local_addr_list:
+ raise ValueError("Address must be a local address (one of {})".format(local_addr_list))
+
+ def _is_valid_port(self):
+ """
+ Check whether the port is valid
+ """
+ if self.prev_value_list and abs(int(self.value) - int(self.prev_value_list[0])) <= 1:
+ raise ValueError("Port {} is already in use by corosync. Leave a gap between multiple rings.".format(self.value))
+ if int(self.value) <= 1024 or int(self.value) > 65535:
+ raise ValueError("Valid port range should be 1025-65535")
+
+ @classmethod
+ def valid_mcast_address(cls, addr, prev_value_list=[]):
+ """
+ Check whether the address is already in use and whether the address is for multicast
+ """
+ cls_inst = cls(addr, prev_value_list)
+ cls_inst._is_mcast_addr()
+
+ @classmethod
+ def valid_ucast_ip(cls, addr, prev_value_list=[]):
+ """
+ Check whether the address is already in use and whether the address exists on local
+ """
+ cls_inst = cls(addr, prev_value_list)
+ cls_inst._is_local_addr(_context.local_ip_list)
+
+ @classmethod
+ def valid_mcast_ip(cls, addr, prev_value_list=[]):
+ """
+ Check whether the address is already in use and whether the address exists on local address and network
+ """
+ cls_inst = cls(addr, prev_value_list)
+ cls_inst._is_local_addr(_context.local_ip_list + _context.local_network_list)
+
+ @classmethod
+ def valid_port(cls, port, prev_value_list=[]):
+ """
+ Check whether the port is valid
+ """
+ cls_inst = cls(port, prev_value_list)
+ cls_inst._is_valid_port()
+
+ @staticmethod
+ def valid_admin_ip(addr, prev_value_list=[]):
+ """
+ Validate admin IP address
+ """
+ ipv6 = utils.IP.is_ipv6(addr)
+
+ # Check whether this IP already configured in cluster
+ ping_cmd = "ping6" if ipv6 else "ping"
+ if invokerc("{} -c 1 {}".format(ping_cmd, addr)):
+ raise ValueError("Address already in use: {}".format(addr))
+
+
+def init_corosync_unicast():
+
+ if _context.yes_to_all:
+ logger.info("Configuring corosync (unicast)")
+ else:
+ logger.info("""Configure Corosync (unicast):
+ This will configure the cluster messaging layer. You will need
+ to specify a network address over which to communicate (default
+ is {}'s network, but you can use the network address of any
+ active interface).
+""".format(_context.default_nic_list[0]))
+
+ ringXaddr_res = []
+ mcastport_res = []
+ default_ports = ["5405", "5407"]
+ two_rings = False
+
+ for i in range(2):
+ ringXaddr = prompt_for_string(
+ 'Address for ring{}'.format(i),
+ default=pick_default_value(_context.default_ip_list, ringXaddr_res),
+ valid_func=Validation.valid_ucast_ip,
+ prev_value=ringXaddr_res)
+ if not ringXaddr:
+ utils.fatal("No value for ring{}".format(i))
+ ringXaddr_res.append(ringXaddr)
+
+ mcastport = prompt_for_string(
+ 'Port for ring{}'.format(i),
+ match='[0-9]+',
+ default=pick_default_value(default_ports, mcastport_res),
+ valid_func=Validation.valid_port,
+ prev_value=mcastport_res)
+ if not mcastport:
+ utils.fatal("Expected a multicast port for ring{}".format(i))
+ mcastport_res.append(mcastport)
+
+ if i == 1 or \
+ not _context.second_heartbeat or \
+ not confirm("\nAdd another heartbeat line?"):
+ break
+ two_rings = True
+
+ corosync.create_configuration(
+ clustername=_context.cluster_name,
+ ringXaddr=ringXaddr_res,
+ mcastport=mcastport_res,
+ transport="udpu",
+ ipv6=_context.ipv6,
+ two_rings=two_rings)
+ sync_file(corosync.conf())
+
+
+def init_corosync_multicast():
+ def gen_mcastaddr():
+ if _context.ipv6:
+ return "ff3e::%s:%d" % (
+ ''.join([random.choice('0123456789abcdef') for _ in range(4)]),
+ random.randint(0, 9))
+ return "239.%d.%d.%d" % (
+ random.randint(0, 255),
+ random.randint(0, 255),
+ random.randint(1, 255))
+
+ if _context.yes_to_all:
+ logger.info("Configuring corosync")
+ else:
+ logger.info("""Configure Corosync:
+ This will configure the cluster messaging layer. You will need
+ to specify a network address over which to communicate (default
+ is {}'s network, but you can use the network address of any
+ active interface).
+""".format(_context.default_nic_list[0]))
+
+ bindnetaddr_res = []
+ mcastaddr_res = []
+ mcastport_res = []
+ default_ports = ["5405", "5407"]
+ two_rings = False
+
+ for i in range(2):
+ bindnetaddr = prompt_for_string(
+ 'IP or network address to bind to',
+ default=pick_default_value(_context.default_ip_list, bindnetaddr_res),
+ valid_func=Validation.valid_mcast_ip,
+ prev_value=bindnetaddr_res)
+ if not bindnetaddr:
+ utils.fatal("No value for bindnetaddr")
+ bindnetaddr_res.append(bindnetaddr)
+
+ mcastaddr = prompt_for_string(
+ 'Multicast address',
+ default=gen_mcastaddr(),
+ valid_func=Validation.valid_mcast_address,
+ prev_value=mcastaddr_res)
+ if not mcastaddr:
+ utils.fatal("No value for mcastaddr")
+ mcastaddr_res.append(mcastaddr)
+
+ mcastport = prompt_for_string(
+ 'Multicast port',
+ match='[0-9]+',
+ default=pick_default_value(default_ports, mcastport_res),
+ valid_func=Validation.valid_port,
+ prev_value=mcastport_res)
+ if not mcastport:
+ utils.fatal("No value for mcastport")
+ mcastport_res.append(mcastport)
+
+ if i == 1 or \
+ not _context.second_heartbeat or \
+ not confirm("\nConfigure a second multicast ring?"):
+ break
+ two_rings = True
+
+ nodeid = None
+ if _context.ipv6:
+ nodeid = utils.gen_nodeid_from_ipv6(_context.default_ip_list[0])
+
+ corosync.create_configuration(
+ clustername=_context.cluster_name,
+ bindnetaddr=bindnetaddr_res,
+ mcastaddr=mcastaddr_res,
+ mcastport=mcastport_res,
+ ipv6=_context.ipv6,
+ nodeid=nodeid,
+ two_rings=two_rings)
+ sync_file(corosync.conf())
+
+
+def adjust_corosync_parameters_according_to_profiles():
+ """
+ Adjust corosync's parameters according profiles
+ """
+ if not _context.profiles_dict:
+ return
+ for k, v in _context.profiles_dict.items():
+ if k.startswith("corosync."):
+ corosync.set_value('.'.join(k.split('.')[1:]), v)
+
+
+def init_corosync():
+ """
+ Configure corosync (unicast or multicast, encrypted?)
+ """
+ init_corosync_auth()
+
+ if os.path.exists(corosync.conf()):
+ if not confirm("%s already exists - overwrite?" % (corosync.conf())):
+ return
+
+ if _context.unicast or _context.cloud_type or not _context.multicast:
+ init_corosync_unicast()
+ else:
+ init_corosync_multicast()
+ adjust_corosync_parameters_according_to_profiles()
+
+
+def init_sbd():
+ """
+ Configure SBD (Storage-based fencing).
+
+ SBD can also run in diskless mode if no device
+ is configured.
+ """
+ import crmsh.sbd
+ if _context.stage == "sbd":
+ crmsh.sbd.clean_up_existing_sbd_resource()
+ _context.sbd_manager.sbd_init()
+
+
+def init_upgradeutil():
+ upgradeutil.force_set_local_upgrade_seq()
+
+
+def init_ocfs2():
+ """
+ OCFS2 configure process
+ """
+ if not _context.ocfs2_devices:
+ return
+ ocfs2_manager = ocfs2.OCFS2Manager(_context)
+ ocfs2_manager.init_ocfs2()
+
+
+def init_cluster():
+ """
+ Initial cluster configuration.
+ """
+ init_cluster_local()
+
+ _rc, nnodes = ShellUtils().get_stdout("crm_node -l")
+ nnodes = len(nnodes.splitlines())
+ if nnodes < 1:
+ utils.fatal("No nodes found in cluster")
+ if nnodes > 1:
+ utils.fatal("Joined existing cluster - will not reconfigure.")
+
+ logger.info("Loading initial cluster configuration")
+
+ crm_configure_load("update", """property cib-bootstrap-options: stonith-enabled=false
+op_defaults op-options: timeout=600 record-pending=true
+rsc_defaults rsc-options: resource-stickiness=1 migration-threshold=3
+""")
+
+ _context.sbd_manager.configure_sbd_resource_and_properties()
+
+
+def init_admin():
+ # Skip this section when -y is passed
+ # unless $ADMIN_IP is set
+ adminaddr = _context.admin_ip
+ if _context.yes_to_all and not adminaddr:
+ return
+
+ if not adminaddr:
+ logger.info("""Configure Administration IP Address:
+ Optionally configure an administration virtual IP
+ address. The purpose of this IP address is to
+ provide a single IP that can be used to interact
+ with the cluster, rather than using the IP address
+ of any specific cluster node.
+""")
+ if not confirm("Do you wish to configure a virtual IP address?"):
+ return
+
+ adminaddr = prompt_for_string('Virtual IP', valid_func=Validation.valid_admin_ip)
+
+ crm_configure_load("update", 'primitive admin-ip IPaddr2 ip=%s op monitor interval=10 timeout=20' % (utils.doublequote(adminaddr)))
+ wait_for_resource("Configuring virtual IP ({})".format(adminaddr), "admin-ip")
+
+
+def configure_qdevice_interactive():
+ """
+ Configure qdevice on interactive mode
+ """
+ if _context.yes_to_all:
+ return
+ logger.info("Configure Qdevice/Qnetd:\n" + QDEVICE_HELP_INFO + "\n")
+ if not confirm("Do you want to configure QDevice?"):
+ return
+ while True:
+ try:
+ qdevice.QDevice.check_package_installed("corosync-qdevice")
+ break
+ except ValueError as err:
+ logger.error(err)
+ if confirm("Please install the package manually and press 'y' to continue"):
+ continue
+ else:
+ return
+
+ qnetd_addr = prompt_for_string("HOST or IP of the QNetd server to be used",
+ valid_func=qdevice.QDevice.check_qnetd_addr)
+ qdevice_port = prompt_for_string("TCP PORT of QNetd server", default=5403,
+ valid_func=qdevice.QDevice.check_qdevice_port)
+ qdevice_algo = prompt_for_string("QNetd decision ALGORITHM (ffsplit/lms)", default="ffsplit",
+ valid_func=qdevice.QDevice.check_qdevice_algo)
+ qdevice_tie_breaker = prompt_for_string("QNetd TIE_BREAKER (lowest/highest/valid node id)", default="lowest",
+ valid_func=qdevice.QDevice.check_qdevice_tie_breaker)
+ qdevice_tls = prompt_for_string("Whether using TLS on QDevice/QNetd (on/off/required)", default="on",
+ valid_func=qdevice.QDevice.check_qdevice_tls)
+ qdevice_heuristics = prompt_for_string("Heuristics COMMAND to run with absolute path; For multiple commands, use \";\" to separate",
+ valid_func=qdevice.QDevice.check_qdevice_heuristics,
+ allow_empty=True)
+ qdevice_heuristics_mode = prompt_for_string("MODE of operation of heuristics (on/sync/off)", default="sync",
+ valid_func=qdevice.QDevice.check_qdevice_heuristics_mode) if qdevice_heuristics else None
+
+ parts = qnetd_addr.split('@', 2)
+ if len(parts) == 2:
+ ssh_user = parts[0]
+ qnetd_host = parts[1]
+ else:
+ ssh_user = None
+ qnetd_host = qnetd_addr
+
+ _context.qdevice_inst = qdevice.QDevice(
+ qnetd_host,
+ port=qdevice_port,
+ algo=qdevice_algo,
+ tie_breaker=qdevice_tie_breaker,
+ tls=qdevice_tls,
+ ssh_user=ssh_user,
+ cmds=qdevice_heuristics,
+ mode=qdevice_heuristics_mode,
+ is_stage=_context.stage == "qdevice")
+
+
+def init_qdevice():
+ """
+ Setup qdevice and qnetd service
+ """
+ if not _context.qdevice_inst:
+ configure_qdevice_interactive()
+ # If don't want to config qdevice, return
+ if not _context.qdevice_inst:
+ ServiceManager().disable_service("corosync-qdevice.service")
+ return
+ logger.info("""Configure Qdevice/Qnetd:""")
+ cluster_node_list = utils.list_cluster_nodes()
+ for node in cluster_node_list:
+ if not ServiceManager().service_is_available("corosync-qdevice.service", node):
+ utils.fatal("corosync-qdevice.service is not available on {}".format(node))
+ qdevice_inst = _context.qdevice_inst
+ qnetd_addr = qdevice_inst.qnetd_addr
+ local_user = None
+ ssh_user = None
+ if qdevice_inst.ssh_user is not None:
+ # if the remote user is specified explicitly, use it
+ ssh_user = qdevice_inst.ssh_user
+ try:
+ local_user = UserOfHost.instance().user_of(utils.this_node())
+ except UserNotFoundError:
+ local_user = ssh_user
+ else:
+ try:
+ # if ssh session has ready been available, use that
+ local_user, ssh_user = UserOfHost.instance().user_pair_for_ssh(qnetd_addr)
+ except UserNotFoundError:
+ pass
+ if ssh_user is None:
+ try:
+ local_user = UserOfHost.instance().user_of(utils.this_node())
+ except UserNotFoundError:
+ local_user = userdir.getuser()
+ ssh_user = local_user
+ # Configure ssh passwordless to qnetd if detect password is needed
+ if UserOfHost.instance().use_ssh_agent():
+ logger.info("Adding public keys to authorized_keys for user root...")
+ for key in ssh_key.AgentClient().list():
+ ssh_key.AuthorizedKeyManager(sh.SSHShell(
+ sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK')}),
+ 'root',
+ )).add(qnetd_addr, ssh_user, key)
+ elif utils.check_ssh_passwd_need(local_user, ssh_user, qnetd_addr):
+ configure_ssh_key(local_user)
+ if 0 != utils.ssh_copy_id_no_raise(local_user, ssh_user, qnetd_addr):
+ msg = f"Failed to login to {ssh_user}@{qnetd_addr}. Please check the credentials."
+ sudoer = userdir.get_sudoer()
+ if sudoer and ssh_user != sudoer:
+ args = ['sudo crm']
+ args += [x for x in sys.argv[1:]]
+ for i, arg in enumerate(args):
+ if arg == '--qnetd-hostname' and i + 1 < len(args):
+ if '@' not in args[i + 1]:
+ args[i + 1] = f'{sudoer}@{qnetd_addr}'
+ msg += '\nOr, run "{}".'.format(' '.join(args))
+ raise ValueError(msg)
+ user_by_host = utils.HostUserConfig()
+ user_by_host.add(local_user, utils.this_node())
+ user_by_host.add(ssh_user, qnetd_addr)
+ user_by_host.save_remote(cluster_node_list)
+ # Start qdevice service if qdevice already configured
+ if utils.is_qdevice_configured() and not confirm("Qdevice is already configured - overwrite?"):
+ qdevice_inst.start_qdevice_service()
+ return
+ qdevice_inst.set_cluster_name()
+ # Validate qnetd node
+ qdevice_inst.valid_qnetd()
+
+ qdevice_inst.config_and_start_qdevice()
+
+ if _context.stage == "qdevice":
+ adjust_properties()
+
+
+def init():
+ """
+ Basic init
+ """
+ if _context.quiet:
+ logger_utils.disable_info_in_console()
+ log_start()
+ init_network()
+
+
+def join_ssh(seed_host, seed_user):
+ """
+ SSH configuration for joining node.
+ """
+ if not seed_host:
+ utils.fatal("No existing IP/hostname specified (use -c option)")
+ local_user = _context.current_user
+
+ if _context.use_ssh_agent:
+ try:
+ ssh_agent = ssh_key.AgentClient()
+ keys = ssh_agent.list()
+ logger.info("Using public keys from ssh-agent...")
+ except ssh_key.Error:
+ logger.error("Cannot get a public key from ssh-agent.")
+ raise
+ else:
+ keys = list()
+ return join_ssh_impl(local_user, seed_host, seed_user, keys)
+
+
+def join_ssh_impl(local_user, seed_host, seed_user, ssh_public_keys: typing.List[ssh_key.Key]):
+ ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).start_service("sshd.service", enable=True)
+ if ssh_public_keys:
+ local_shell = sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK')})
+ join_ssh_with_ssh_agent(local_shell, local_user, seed_host, seed_user, ssh_public_keys)
+ else:
+ local_shell = sh.LocalShell()
+ configure_ssh_key(local_user)
+ if 0 != utils.ssh_copy_id_no_raise(local_user, seed_user, seed_host):
+ msg = f"Failed to login to {seed_user}@{seed_host}. Please check the credentials."
+ sudoer = userdir.get_sudoer()
+ if sudoer and seed_user != sudoer:
+ args = ['sudo crm']
+ args += [x for x in sys.argv[1:]]
+ for i, arg in enumerate(args):
+ if arg == '-c' or arg == '--cluster-node' and i + 1 < len(args):
+ if '@' not in args[i+1]:
+ args[i + 1] = f'{sudoer}@{seed_host}'
+ msg += '\nOr, run "{}".'.format(' '.join(args))
+ raise ValueError(msg)
+ # After this, login to remote_node is passwordless
+ swap_public_ssh_key(seed_host, local_user, seed_user, local_user, seed_user, add=True)
+
+ # This makes sure the seed host has its own SSH keys in its own
+ # authorized_keys file (again, to help with the case where the
+ # user has done manual initial setup without the assistance of
+ # ha-cluster-init).
+ if not ssh_public_keys:
+ local_shell.get_stdout_or_raise_error(
+ local_user,
+ "ssh {} {}@{} sudo crm cluster init -i {} ssh_remote".format(
+ SSH_OPTION, seed_user, seed_host, _context.default_nic_list[0],
+ ),
+ )
+ user_by_host = utils.HostUserConfig()
+ user_by_host.add(seed_user, seed_host)
+ user_by_host.add(local_user, utils.this_node())
+ user_by_host.set_no_generating_ssh_key(bool(ssh_public_keys))
+ user_by_host.save_local()
+
+ configure_ssh_key('hacluster')
+ change_user_shell('hacluster')
+ swap_public_ssh_key_for_secondary_user(sh.cluster_shell(), seed_host, 'hacluster')
+
+
+def join_ssh_with_ssh_agent(
+ local_shell: sh.LocalShell,
+ local_user: str, seed_host: str, seed_user: str,
+ ssh_public_keys: typing.List[ssh_key.Key],
+):
+ # As ssh-agent is used, the local_user does not have any effects
+ shell = sh.SSHShell(local_shell, 'root')
+ authorized_key_manager = ssh_key.AuthorizedKeyManager(shell)
+ if not shell.can_run_as(seed_host, seed_user):
+ for key in ssh_public_keys:
+ authorized_key_manager.add(seed_host, seed_user, key)
+ if seed_user != 'root' and 0 != shell.subprocess_run_without_input(
+ seed_host, seed_user, 'sudo true',
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ ).returncode:
+ raise ValueError(f'Failed to sudo on {seed_user}@{seed_host}')
+ for key in ssh_public_keys:
+ authorized_key_manager.add(None, local_user, key)
+
+
+def swap_public_ssh_key_for_secondary_user(shell: sh.ClusterShell, host: str, user: str):
+ key_file_manager = ssh_key.KeyFileManager(shell)
+ local_key = ssh_key.KeyFile(key_file_manager.list_public_key_for_user(None, user)[0])
+ is_generated, remote_keys = key_file_manager.ensure_key_pair_exists_for_user(host, user)
+ if is_generated:
+ logger.info("A new ssh keypair is generated for user %s@%s.", user, host)
+ authorized_key_manager = ssh_key.AuthorizedKeyManager(shell)
+ authorized_key_manager.add(None, user, remote_keys[0])
+ authorized_key_manager.add(host, user, local_key)
+
+
+def swap_public_ssh_key(
+ remote_node,
+ local_user_to_swap,
+ remote_user_to_swap,
+ local_sudoer,
+ remote_sudoer,
+ add=False,
+):
+ """
+ Swap public ssh key between remote_node and local
+ """
+ # Detect whether need password to login to remote_node
+ if utils.check_ssh_passwd_need(local_user_to_swap, remote_user_to_swap, remote_node):
+ export_ssh_key_non_interactive(local_user_to_swap, remote_user_to_swap, remote_node, local_sudoer, remote_sudoer)
+
+ if add:
+ public_key = generate_ssh_key_pair_on_remote(local_sudoer, remote_node, remote_sudoer, remote_user_to_swap)
+ _, _, local_authorized_file = key_files(local_user_to_swap).values()
+ sh.LocalShell().get_stdout_or_raise_error(local_user_to_swap, "sed -i '$a {}' '{}'".format(public_key, local_authorized_file))
+ return public_key
+ else:
+ try:
+ import_ssh_key(local_user_to_swap, remote_user_to_swap, local_sudoer, remote_node, remote_sudoer)
+ except ValueError as e:
+ logger.warning(e)
+
+
+def remote_public_key_from(remote_user, local_sudoer, remote_node, remote_sudoer):
+ "Get the id_rsa.pub from the remote node"
+ cmd = 'cat ~/.ssh/id_rsa.pub'
+ result = sh.LocalShell().su_subprocess_run(
+ local_sudoer,
+ 'ssh {} {}@{} sudo -H -u {} /bin/sh'.format(constants.SSH_OPTION, remote_sudoer, remote_node, remote_user),
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ if result.returncode != 0:
+ utils.fatal("Can't get the remote id_rsa.pub from {}: {}".format(
+ remote_node,
+ codecs.decode(result.stderr, 'utf-8', 'replace'),
+ ))
+ return result.stdout.decode('utf-8')
+
+
+def join_csync2(seed_host, remote_user):
+ """
+ Csync2 configuration for joining node.
+ """
+ if not seed_host:
+ utils.fatal("No existing IP/hostname specified (use -c option)")
+
+ logger.info("Configuring csync2")
+ # Necessary if re-running join on a node that's been configured before.
+ utils.rmfile("/var/lib/csync2/{}.db3".format(utils.this_node()), ignore_errors=True)
+
+ # Not automatically updating /etc/hosts - risky in the general case.
+ # etc_hosts_add_me
+ # local hosts_line=$(etc_hosts_get_me)
+ # [ -n "$hosts_line" ] || error "No valid entry for $(hostname) in /etc/hosts - csync2 can't work"
+
+ # If we *were* updating /etc/hosts, the next line would have "\"$hosts_line\"" as
+ # the last arg (but this requires re-enabling this functionality in ha-cluster-init)
+ shell = sh.cluster_shell()
+ cmd = "crm cluster init -i {} csync2_remote {}".format(_context.default_nic_list[0], utils.this_node())
+ shell.get_stdout_or_raise_error(cmd, seed_host)
+
+ # This is necessary if syncing /etc/hosts (to ensure everyone's got the
+ # same list of hosts)
+ # local tmp_conf=/etc/hosts.$$
+ # invoke scp root@seed_host:/etc/hosts $tmp_conf \
+ # || error "Can't retrieve /etc/hosts from seed_host"
+ # install_tmp $tmp_conf /etc/hosts
+ utils.copy_remote_textfile(remote_user, seed_host, "/etc/csync2/csync2.cfg", "/etc/csync2")
+ utils.copy_remote_textfile(remote_user, seed_host, "/etc/csync2/key_hagroup", "/etc/csync2")
+
+ logger.info("Starting {} service".format(CSYNC2_SERVICE))
+ ServiceManager(shell).start_service(CSYNC2_SERVICE, enable=True)
+
+ # Sync new config out. This goes to all hosts; csync2.cfg definitely
+ # needs to go to all hosts (else hosts other than the seed and the
+ # joining host won't have the joining host in their config yet).
+ # Strictly, the rest of the files need only go to the new host which
+ # could theoretically be effected using `csync2 -xv -P $(hostname)`,
+ # but this still leaves all the other files in dirty state (becuase
+ # they haven't gone to all nodes in the cluster, which means a
+ # subseqent join of another node can fail its sync of corosync.conf
+ # when it updates expected_votes. Grrr...
+ with logger_utils.status_long("csync2 syncing files in cluster"):
+ cmd = "sudo csync2 -rm /;sudo csync2 -rxv || sudo csync2 -rf / && sudo csync2 -rxv"
+ rc, _, stderr = shell.get_rc_stdout_stderr_without_input(seed_host, cmd)
+ if rc != 0:
+ print("")
+ logger.warning("csync2 run failed - some files may not be sync'd: %s", stderr)
+
+
+def join_ssh_merge(cluster_node, remote_user):
+ """
+ Ensure known_hosts is the same in all nodes
+ """
+ logger.info("Merging known_hosts")
+
+ hosts = _context.node_list_in_cluster
+
+ shell = sh.cluster_shell()
+ # create local entry in known_hosts
+ shell.ssh_to_localhost(None, 'true')
+
+ known_hosts_new = set()
+
+ cat_cmd = "[ -e ~/.ssh/known_hosts ] && cat ~/.ssh/known_hosts || true"
+ #logger_utils.log_only_to_file("parallax.call {} : {}".format(hosts, cat_cmd))
+ for host in hosts:
+ known_hosts_content = shell.get_stdout_or_raise_error(cat_cmd, host)
+ if known_hosts_content:
+ known_hosts_new.update((utils.to_ascii(known_hosts_content) or "").splitlines())
+
+ if known_hosts_new:
+ hoststxt = "\n".join(sorted(known_hosts_new))
+ #results = parallax.parallax_copy(hosts, tmpf, known_hosts_path, strict=False)
+ for host in hosts:
+ utils.write_remote_file(hoststxt, "~/.ssh/known_hosts", utils.user_of(host), host)
+
+
+def update_expected_votes():
+ # get a list of nodes, excluding remote nodes
+ nodelist = None
+ loop_count = 0
+ device_votes = 0
+ nodecount = 0
+ expected_votes = 0
+ while True:
+ rc, nodelist_text = ShellUtils().get_stdout("cibadmin -Ql --xpath '/cib/status/node_state'")
+ if rc == 0:
+ try:
+ nodelist_xml = etree.fromstring(nodelist_text)
+ nodelist = [n.get('uname') for n in nodelist_xml.xpath('//node_state') if n.get('remote_node') != 'true']
+ if len(nodelist) >= 2:
+ break
+ except Exception:
+ break
+ # timeout: 10 seconds
+ if loop_count == 10:
+ break
+ loop_count += 1
+ sleep(1)
+
+ # Increase expected_votes
+ # TODO: wait to adjust expected_votes until after cluster join,
+ # so that we can ask the cluster for the current membership list
+ # Have to check if a qnetd device is configured and increase
+ # expected_votes in that case
+ is_qdevice_configured = utils.is_qdevice_configured()
+ if nodelist is None:
+ for v in corosync.get_values("quorum.expected_votes"):
+ expected_votes = v
+
+ # For node >= 2, expected_votes = nodecount + device_votes
+ # Assume nodecount is N, for ffsplit, qdevice only has one vote
+ # which means that device_votes is 1, ie:expected_votes = N + 1;
+ # while for lms, qdevice has N - 1 votes, ie: expected_votes = N + (N - 1)
+ # and update quorum.device.net.algorithm based on device_votes
+
+ if corosync.get_value("quorum.device.net.algorithm") == "lms":
+ device_votes = int((expected_votes - 1) / 2)
+ nodecount = expected_votes - device_votes
+ # as nodecount will increase 1, and device_votes is nodecount - 1
+ # device_votes also increase 1
+ device_votes += 1
+ elif corosync.get_value("quorum.device.net.algorithm") == "ffsplit":
+ device_votes = 1
+ nodecount = expected_votes - device_votes
+ elif is_qdevice_configured:
+ device_votes = 0
+ nodecount = v
+
+ nodecount += 1
+ expected_votes = nodecount + device_votes
+ corosync.set_value("quorum.expected_votes", str(expected_votes))
+ else:
+ nodecount = len(nodelist)
+ expected_votes = 0
+ # For node >= 2, expected_votes = nodecount + device_votes
+ # Assume nodecount is N, for ffsplit, qdevice only has one vote
+ # which means that device_votes is 1, ie:expected_votes = N + 1;
+ # while for lms, qdevice has N - 1 votes, ie: expected_votes = N + (N - 1)
+ if corosync.get_value("quorum.device.net.algorithm") == "ffsplit":
+ device_votes = 1
+ if corosync.get_value("quorum.device.net.algorithm") == "lms":
+ device_votes = nodecount - 1
+
+ if nodecount > 1:
+ expected_votes = nodecount + device_votes
+
+ if corosync.get_value("quorum.expected_votes"):
+ corosync.set_value("quorum.expected_votes", str(expected_votes))
+ if is_qdevice_configured:
+ corosync.set_value("quorum.device.votes", device_votes)
+ corosync.set_value("quorum.two_node", 1 if expected_votes == 2 else 0)
+
+ sync_file(corosync.conf())
+
+
+def setup_passwordless_with_other_nodes(init_node, remote_user):
+ """
+ Setup passwordless with other cluster nodes
+
+ Should fetch the node list from init node, then swap the key
+ """
+ # Fetch cluster nodes list
+ local_user = _context.current_user
+ shell = sh.cluster_shell()
+ rc, out, err = shell.get_rc_stdout_stderr_without_input(init_node, 'crm_node -l')
+ if rc != 0:
+ utils.fatal("Can't fetch cluster nodes list from {}: {}".format(init_node, err))
+ cluster_nodes_list = []
+ for line in out.splitlines():
+ # Parse line in format: <id> <nodename> <state>, and collect the
+ # nodename.
+ tokens = line.split()
+ if len(tokens) == 0:
+ pass # Skip any spurious empty line.
+ elif len(tokens) < 3:
+ logger.warning("Unable to configure passwordless ssh with nodeid {}. The "
+ "node has no known name and/or state information".format(
+ tokens[0]))
+ elif tokens[2] != "member":
+ logger.warning("Skipping configuration of passwordless ssh with node {} in "
+ "state '{}'. The node is not a current member".format(
+ tokens[1], tokens[2]))
+ else:
+ cluster_nodes_list.append(tokens[1])
+ user_by_host = utils.HostUserConfig()
+ user_by_host.add(local_user, utils.this_node())
+ try:
+ user_list, host_list = _fetch_core_hosts(shell, init_node)
+ for user, host in zip(user_list, host_list):
+ user_by_host.add(user, host)
+ except ValueError:
+ # No core.hosts on the seed host, may be a cluster upgraded from previous version
+ pass
+ user_by_host.save_local()
+
+ # Filter out init node from cluster_nodes_list
+ rc, out, err = shell.get_rc_stdout_stderr_without_input(init_node, 'hostname')
+ if rc != 0:
+ utils.fatal("Can't fetch hostname of {}: {}".format(init_node, err))
+ # Swap ssh public key between join node and other cluster nodes
+ if not _context.use_ssh_agent:
+ for node in (node for node in cluster_nodes_list if node != out):
+ remote_user_to_swap = utils.user_of(node)
+ remote_privileged_user = remote_user_to_swap
+ utils.ssh_copy_id(local_user, remote_privileged_user, node)
+ swap_public_ssh_key(node, local_user, remote_user_to_swap, local_user, remote_privileged_user)
+ if local_user != 'hacluster':
+ change_user_shell('hacluster', node)
+ swap_public_ssh_key(node, 'hacluster', 'hacluster', local_user, remote_privileged_user, add=True)
+ if local_user != 'hacluster':
+ swap_key_for_hacluster(cluster_nodes_list)
+ else:
+ swap_key_for_hacluster(cluster_nodes_list)
+
+ user_by_host.save_remote(cluster_nodes_list)
+
+
+def swap_key_for_hacluster(other_node_list):
+ """
+ In some cases, old cluster may not be configured passwordless for hacluster.
+ The new join node should check and swap the public key between the old cluster nodes.
+ """
+ shell = sh.cluster_shell()
+ key_file_manager = ssh_key.KeyFileManager(shell)
+ authorized_key_manager = ssh_key.AuthorizedKeyManager(shell)
+ keys: typing.List[ssh_key.Key] = [
+ key_file_manager.ensure_key_pair_exists_for_user(node, 'hacluster')[1][0]
+ for node in other_node_list
+ ]
+ keys.append(ssh_key.KeyFile(key_file_manager.list_public_key_for_user(None, 'hacluster')[0]))
+ for key in keys:
+ authorized_key_manager.add(None, 'hacluster', key)
+ for node in other_node_list:
+ for key in keys:
+ authorized_key_manager.add(node, 'hacluster', key)
+
+
+def sync_files_to_disk():
+ """
+ Sync file content to disk between cluster nodes
+ """
+ files_string = ' '.join(filter(lambda f: os.path.isfile(f), FILES_TO_SYNC))
+ if files_string:
+ utils.cluster_run_cmd("sync {}".format(files_string.strip()))
+
+
+def join_cluster(seed_host, remote_user):
+ """
+ Cluster configuration for joining node.
+ """
+ def get_local_nodeid():
+ # for IPv6
+ return utils.gen_nodeid_from_ipv6(_context.local_ip_list[0])
+
+ def update_nodeid(nodeid, node=None):
+ # for IPv6
+ if node and node != utils.this_node():
+ cmd = "crm corosync set totem.nodeid %d" % nodeid
+ invoke("crm cluster run '{}' {}".format(cmd, node))
+ else:
+ corosync.set_value("totem.nodeid", nodeid)
+
+ is_qdevice_configured = utils.is_qdevice_configured()
+ if is_qdevice_configured and not ServiceManager().service_is_available("corosync-qdevice.service"):
+ utils.fatal("corosync-qdevice.service is not available")
+
+ shutil.copy(corosync.conf(), COROSYNC_CONF_ORIG)
+
+ # check if use IPv6
+ ipv6_flag = False
+ ipv6 = corosync.get_value("totem.ip_version")
+ if ipv6 and ipv6 == "ipv6":
+ ipv6_flag = True
+ _context.ipv6 = ipv6_flag
+
+ init_network()
+
+ # check whether have two rings
+ rrp_flag = False
+ rrp = corosync.get_value("totem.rrp_mode")
+ if rrp in ('active', 'passive'):
+ rrp_flag = True
+
+ # It would be massively useful at this point if new nodes could come
+ # up in standby mode, so we could query the CIB locally to see if
+ # there was any further local setup that needed doing, e.g.: creating
+ # mountpoints for clustered filesystems. Unfortunately we don't have
+ # that yet, so the following crawling horror takes a punt on the seed
+ # node being up, then asks it for a list of mountpoints...
+ shell = sh.cluster_shell()
+ if seed_host:
+ _rc, outp, _ = shell.get_rc_stdout_stderr_without_input(seed_host, "cibadmin -Q --xpath \"//primitive\"")
+ if outp:
+ xml = etree.fromstring(outp)
+ mountpoints = xml.xpath(' and '.join(['//primitive[@class="ocf"',
+ '@provider="heartbeat"',
+ '@type="Filesystem"]']) +
+ '/instance_attributes/nvpair[@name="directory"]/@value')
+ for m in mountpoints:
+ invoke("mkdir -p {}".format(m))
+ else:
+ logger.info("No existing IP/hostname specified - skipping mountpoint detection/creation")
+
+ # Bump expected_votes in corosync.conf
+ # TODO(must): this is rather fragile (see related code in ha-cluster-remove)
+
+ # If corosync.conf() doesn't exist or is empty, we will fail here. (bsc#943227)
+ if not os.path.exists(corosync.conf()):
+ utils.fatal("{} is not readable. Please ensure that hostnames are resolvable.".format(corosync.conf()))
+
+ # if unicast, we need to add our node to $corosync.conf()
+ is_unicast = corosync.is_unicast()
+ if is_unicast:
+ ringXaddr_res = []
+ for i in 0, 1:
+ ringXaddr = prompt_for_string(
+ 'Address for ring{}'.format(i),
+ default=pick_default_value(_context.default_ip_list, ringXaddr_res),
+ valid_func=Validation.valid_ucast_ip,
+ prev_value=ringXaddr_res)
+ # The ringXaddr here still might be empty on non-interactive mode
+ # when don't have default ip addresses(_context.default_ip_list is empty or just one)
+ if not ringXaddr:
+ utils.fatal("No value for ring{}".format(i))
+ ringXaddr_res.append(ringXaddr)
+ if not rrp_flag:
+ break
+ invoke("rm -f /var/lib/heartbeat/crm/* /var/lib/pacemaker/cib/*")
+ try:
+ corosync.add_node_ucast(ringXaddr_res)
+ except corosync.IPAlreadyConfiguredError as e:
+ logger.warning(e)
+ sync_file(corosync.conf())
+ shell.get_stdout_or_raise_error('sudo corosync-cfgtool -R', seed_host)
+
+ _context.sbd_manager.join_sbd(remote_user, seed_host)
+
+ if ipv6_flag and not is_unicast:
+ # for ipv6 mcast
+ # using ipv6 need nodeid configured
+ local_nodeid = get_local_nodeid()
+ update_nodeid(local_nodeid)
+
+ if is_qdevice_configured and not is_unicast:
+ # expected_votes here maybe is "0", set to "3" to make sure cluster can start
+ corosync.set_value("quorum.expected_votes", "3")
+
+ # Initialize the cluster before adjusting quorum. This is so
+ # that we can query the cluster to find out how many nodes
+ # there are (so as not to adjust multiple times if a previous
+ # attempt to join the cluster failed)
+ init_cluster_local()
+
+ adjust_properties()
+
+ with logger_utils.status_long("Reloading cluster configuration"):
+
+ if ipv6_flag and not is_unicast:
+ # for ipv6 mcast
+ nodeid_dict = {}
+ _rc, outp, _ = ShellUtils().get_stdout_stderr("crm_node -l")
+ if _rc == 0:
+ for line in outp.splitlines():
+ tokens = line.split()
+ if len(tokens) == 0:
+ pass # Skip any spurious empty line.
+ elif len(tokens) < 3:
+ logger.warning("Unable to update configuration for nodeid {}. "
+ "The node has no known name and/or state "
+ "information".format(tokens[0]))
+ else:
+ nodeid_dict[tokens[1]] = tokens[0]
+
+ # apply nodelist in cluster
+ if is_unicast or is_qdevice_configured:
+ invoke("crm cluster run 'crm corosync reload'")
+
+ update_expected_votes()
+ # Trigger corosync config reload to ensure expected_votes is propagated
+ invoke("corosync-cfgtool -R")
+
+ # Ditch no-quorum-policy=ignore
+ _rc, outp = ShellUtils().get_stdout("crm configure show")
+ if re.search('no-quorum-policy=.*ignore', outp):
+ invoke("crm_attribute --attr-name no-quorum-policy --delete-attr")
+
+ # if unicast, we need to reload the corosync configuration
+ # on the other nodes
+ if is_unicast:
+ invoke("crm cluster run 'crm corosync reload'")
+
+ if ipv6_flag and not is_unicast:
+ # for ipv6 mcast
+ # after csync2_update, all config files are same
+ # but nodeid must be uniqe
+ for node in list(nodeid_dict.keys()):
+ if node == utils.this_node():
+ continue
+ update_nodeid(int(nodeid_dict[node]), node)
+ update_nodeid(local_nodeid)
+
+ sync_files_to_disk()
+
+ if is_qdevice_configured:
+ start_qdevice_on_join_node(seed_host)
+ else:
+ ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).disable_service("corosync-qdevice.service")
+
+
+def adjust_priority_in_rsc_defaults(is_2node_wo_qdevice):
+ """
+ Adjust priority in rsc_defaults
+
+ Set priority=1 when current cluster is 2 nodes without qdevice;
+ else set priority=0
+ """
+ if is_2node_wo_qdevice:
+ utils.set_property("priority", 1, property_type="rsc_defaults", conditional=True)
+ else:
+ utils.set_property("priority", 0, property_type="rsc_defaults")
+
+
+def adjust_priority_fencing_delay(is_2node_wo_qdevice):
+ """
+ Adjust priority-fencing-delay
+
+ When pcmk_delay_max is set in fence agent,
+ and the current cluster is 2 nodes without qdevice,
+ set priority-fencing-delay=2*pcmk_delay_max
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm configure show related:stonith")
+ if not out:
+ return
+ pcmk_delay_max_v_list = re.findall("pcmk_delay_max=(\w+)", out)
+ if pcmk_delay_max_v_list:
+ max_value = max([int(utils.crm_msec(v)/1000) for v in pcmk_delay_max_v_list])
+ if pcmk_delay_max_v_list and is_2node_wo_qdevice:
+ utils.set_property("priority-fencing-delay", 2*max_value, conditional=True)
+ else:
+ utils.set_property("priority-fencing-delay", 0)
+
+
+def start_qdevice_on_join_node(seed_host):
+ """
+ Doing qdevice certificate process and start qdevice service on join node
+ """
+ with logger_utils.status_long("Starting corosync-qdevice.service"):
+ if not corosync.is_unicast():
+ corosync.add_nodelist_from_cmaptool()
+ sync_file(corosync.conf())
+ invoke("crm corosync reload")
+ if utils.is_qdevice_tls_on():
+ qnetd_addr = corosync.get_value("quorum.device.net.host")
+ qdevice_inst = qdevice.QDevice(qnetd_addr, cluster_node=seed_host)
+ qdevice_inst.certificate_process_on_join()
+ ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).start_service("corosync-qdevice.service", enable=True)
+
+
+def get_cluster_node_ip(node: str) -> str:
+ """
+ ringx_addr might be hostname or IP
+ _context.cluster_node by now is always hostname
+
+ If ring0_addr is IP, we should get the configured iplist which belong _context.cluster_node
+ Then filter out which one is configured as ring0_addr
+ At last assign that ip to _context.cluster_node_ip which will be removed later
+ """
+ addr_list = corosync.get_values('nodelist.node.ring0_addr')
+ if node in addr_list:
+ return
+
+ ip_list = utils.get_iplist_from_name(node)
+ for ip in ip_list:
+ if ip in addr_list:
+ return ip
+
+
+def stop_services(stop_list, remote_addr=None):
+ """
+ Stop cluster related service
+ """
+ service_manager = ServiceManager()
+ for service in stop_list:
+ if service_manager.service_is_active(service, remote_addr=remote_addr):
+ logger.info("Stopping the %s%s", service, " on {}".format(remote_addr) if remote_addr else "")
+ service_manager.stop_service(service, disable=True, remote_addr=remote_addr)
+
+
+def rm_configuration_files(remote=None):
+ """
+ Delete configuration files from the node to be removed
+ """
+ shell = sh.cluster_shell()
+ shell.get_stdout_or_raise_error("rm -f {}".format(' '.join(_context.rm_list)), remote)
+ # restore original sbd configuration file from /usr/share/fillup-templates/sysconfig.sbd
+ if utils.package_is_installed("sbd", remote_addr=remote):
+ from .sbd import SBDManager
+ cmd = "cp {} {}".format(SBDManager.SYSCONFIG_SBD_TEMPLATE, SYSCONFIG_SBD)
+ shell.get_stdout_or_raise_error(cmd, remote)
+
+
+def remove_node_from_cluster(node):
+ """
+ Remove node from running cluster and the corosync / pacemaker configuration.
+ """
+ node_ip = get_cluster_node_ip(node)
+ stop_services(SERVICES_STOP_LIST, remote_addr=node)
+ qdevice.QDevice.remove_qdevice_db([node])
+ rm_configuration_files(node)
+
+ # execute the command : crm node delete $HOSTNAME
+ logger.info("Removing the node {}".format(node))
+ if not NodeMgmt.call_delnode(node):
+ utils.fatal("Failed to remove {}.".format(node))
+
+ if not invokerc("sed -i /{}/d {}".format(node, CSYNC2_CFG)):
+ utils.fatal("Removing the node {} from {} failed".format(node, CSYNC2_CFG))
+
+ # Remove node from nodelist
+ if corosync.get_values("nodelist.node.ring0_addr"):
+ corosync.del_node(node_ip if node_ip is not None else node)
+
+ decrease_expected_votes()
+
+ adjust_properties()
+
+ logger.info("Propagating configuration changes across the remaining nodes")
+ sync_file(CSYNC2_CFG)
+ sync_file(corosync.conf())
+
+ # Trigger corosync config reload to ensure expected_votes is propagated
+ invoke("corosync-cfgtool -R")
+
+
+def decrease_expected_votes():
+ '''
+ Decrement expected_votes in corosync.conf
+ '''
+ vote = corosync.get_value("quorum.expected_votes")
+ if not vote:
+ return
+ quorum = int(vote)
+ new_quorum = quorum - 1
+ if utils.is_qdevice_configured():
+ new_nodecount = 0
+ device_votes = 0
+ nodecount = 0
+
+ if corosync.get_value("quorum.device.net.algorithm") == "lms":
+ nodecount = int((quorum + 1)/2)
+ new_nodecount = nodecount - 1
+ device_votes = new_nodecount - 1
+
+ elif corosync.get_value("quorum.device.net.algorithm") == "ffsplit":
+ device_votes = 1
+ nodecount = quorum - device_votes
+ new_nodecount = nodecount - 1
+
+ if new_nodecount > 1:
+ new_quorum = new_nodecount + device_votes
+ else:
+ new_quorum = 0
+
+ corosync.set_value("quorum.device.votes", device_votes)
+ else:
+ corosync.set_value("quorum.two_node", 1 if new_quorum == 2 else 0)
+ corosync.set_value("quorum.expected_votes", str(new_quorum))
+
+
+def bootstrap_init(context):
+ """
+ Init cluster process
+ """
+ global _context
+ _context = context
+
+ init()
+
+ stage = _context.stage
+ if stage is None:
+ stage = ""
+
+ # vgfs stage requires running cluster, everything else requires inactive cluster,
+ # except ssh and csync2 (which don't care) and csync2_remote (which mustn't care,
+ # just in case this breaks ha-cluster-join on another node).
+ if stage in ("vgfs", "admin", "qdevice", "ocfs2"):
+ if not _context.cluster_is_running:
+ utils.fatal("Cluster is inactive - can't run %s stage" % (stage))
+ elif stage == "":
+ if _context.cluster_is_running:
+ utils.fatal("Cluster is currently active - can't run")
+ elif stage not in ("ssh", "ssh_remote", "csync2", "csync2_remote", "sbd", "ocfs2"):
+ if _context.cluster_is_running:
+ utils.fatal("Cluster is currently active - can't run %s stage" % (stage))
+
+ _context.load_profiles()
+ _context.init_sbd_manager()
+
+ # Need hostname resolution to work, want NTP (but don't block ssh_remote or csync2_remote)
+ if stage not in ('ssh_remote', 'csync2_remote'):
+ check_tty()
+ if not check_prereqs(stage):
+ return
+ elif stage == 'csync2_remote':
+ args = _context.args
+ logger_utils.log_only_to_file("args: {}".format(args))
+ if len(args) != 2:
+ utils.fatal("Expected NODE argument to csync2_remote")
+ _context.cluster_node = args[1]
+
+ if stage and _context.cluster_is_running and \
+ not ServiceManager(shell=sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active(CSYNC2_SERVICE):
+ _context.skip_csync2 = True
+ _context.node_list_in_cluster = utils.list_cluster_nodes()
+ elif not _context.cluster_is_running:
+ _context.node_list_in_cluster = [utils.this_node()]
+
+ if stage != "":
+ globals()["init_" + stage]()
+ else:
+ init_ssh()
+ if _context.skip_csync2:
+ ServiceManager().stop_service(CSYNC2_SERVICE, disable=True)
+ else:
+ init_csync2()
+ init_corosync()
+ init_remote_auth()
+ init_sbd()
+ init_upgradeutil()
+
+ lock_inst = lock.Lock()
+ try:
+ with lock_inst.lock():
+ init_cluster()
+ init_admin()
+ init_qdevice()
+ init_ocfs2()
+ except lock.ClaimLockError as err:
+ utils.fatal(err)
+
+ bootstrap_finished()
+
+
+def bootstrap_add(context):
+ """
+ Adds the given node to the cluster.
+ """
+ if not context.user_at_node_list:
+ return
+
+ global _context
+ _context = context
+
+ options = ""
+ for nic in _context.nic_list:
+ options += '-i {} '.format(nic)
+ options = " {}".format(options.strip()) if options else ""
+
+ if context.use_ssh_agent:
+ options += ' --use-ssh-agent'
+
+ shell = sh.ClusterShell(sh.LocalShell(), UserOfHost.instance(), _context.use_ssh_agent)
+ for (user, node) in (_parse_user_at_host(x, _context.current_user) for x in _context.user_at_node_list):
+ print()
+ logger.info("Adding node {} to cluster".format(node))
+ cmd = 'crm cluster join -y {} -c {}@{}'.format(options, _context.current_user, utils.this_node())
+ logger.info("Running command on {}: {}".format(node, cmd))
+ out = shell.get_stdout_or_raise_error(cmd, node)
+ print(out)
+
+
+def bootstrap_join(context):
+ """
+ Join cluster process
+ """
+ global _context
+ _context = context
+
+ init()
+ _context.init_sbd_manager()
+
+ check_tty()
+
+ corosync_active = ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("corosync.service")
+ if corosync_active and _context.stage != "ssh":
+ utils.fatal("Abort: Cluster is currently active. Run this command on a node joining the cluster.")
+
+ if not check_prereqs("join"):
+ return
+
+ if _context.stage != "":
+ remote_user, cluster_node = _parse_user_at_host(_context.cluster_node, _context.current_user)
+ globals()["join_" + _context.stage](cluster_node, remote_user)
+ else:
+ if not _context.yes_to_all and _context.cluster_node is None:
+ logger.info("""Join This Node to Cluster:
+ You will be asked for the IP address of an existing node, from which
+ configuration will be copied. If you have not already configured
+ passwordless ssh between nodes, you will be prompted for the root
+ password of the existing node.
+""")
+ # TODO: prompt for user@host
+ cluster_user_at_node = prompt_for_string("IP address or hostname of existing node (e.g.: 192.168.1.1)", ".+")
+ _context.cluster_node = cluster_user_at_node
+ _context.initialize_user()
+
+ init_upgradeutil()
+ remote_user, cluster_node = _parse_user_at_host(_context.cluster_node, _context.current_user)
+ utils.ping_node(cluster_node)
+
+ join_ssh(cluster_node, remote_user)
+ remote_user = utils.user_of(cluster_node)
+
+ service_manager = ServiceManager()
+ n = 0
+ while n < REJOIN_COUNT:
+ if service_manager.service_is_active("pacemaker.service", cluster_node):
+ break
+ n += 1
+ logger.warning("Cluster is inactive on %s. Retry in %d seconds", cluster_node, REJOIN_INTERVAL)
+ sleep(REJOIN_INTERVAL)
+ else:
+ utils.fatal("Cluster is inactive on {}".format(cluster_node))
+
+ lock_inst = lock.RemoteLock(cluster_node)
+ try:
+ with lock_inst.lock():
+ _context.node_list_in_cluster = utils.fetch_cluster_node_list_from_node(cluster_node)
+ setup_passwordless_with_other_nodes(cluster_node, remote_user)
+ join_remote_auth(cluster_node, remote_user)
+ _context.skip_csync2 = not service_manager.service_is_active(CSYNC2_SERVICE, cluster_node)
+ if _context.skip_csync2:
+ service_manager.stop_service(CSYNC2_SERVICE, disable=True)
+ retrieve_all_config_files(cluster_node)
+ logger.warning("csync2 is not initiated yet. Before using csync2 for the first time, please run \"crm cluster init csync2 -y\" on any one node. Note, this may take a while.")
+ else:
+ join_csync2(cluster_node, remote_user)
+ join_ssh_merge(cluster_node, remote_user)
+ probe_partitions()
+ join_ocfs2(cluster_node, remote_user)
+ join_cluster(cluster_node, remote_user)
+ except (lock.SSHError, lock.ClaimLockError) as err:
+ utils.fatal(err)
+
+ bootstrap_finished()
+
+
+def bootstrap_finished():
+ logger.info("Done (log saved to %s)" % (log.CRMSH_LOG_FILE))
+
+
+def join_ocfs2(peer_host, peer_user):
+ """
+ If init node configured OCFS2 device, verify that device on join node
+ """
+ ocfs2_inst = ocfs2.OCFS2Manager(_context)
+ ocfs2_inst.join_ocfs2(peer_host)
+
+
+def join_remote_auth(node, user):
+ if os.path.exists(PCMK_REMOTE_AUTH):
+ utils.rmfile(PCMK_REMOTE_AUTH)
+ pcmk_remote_dir = os.path.dirname(PCMK_REMOTE_AUTH)
+ utils.mkdirs_owned(pcmk_remote_dir, mode=0o750, gid="haclient")
+ utils.touch(PCMK_REMOTE_AUTH)
+
+
+def remove_qdevice():
+ """
+ Remove qdevice service and configuration from cluster
+ """
+ if not utils.is_qdevice_configured():
+ utils.fatal("No QDevice configuration in this cluster")
+ if not confirm("Removing QDevice service and configuration from cluster: Are you sure?"):
+ return
+
+ utils.check_all_nodes_reachable()
+ qdevice_reload_policy = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
+
+ logger.info("Disable corosync-qdevice.service")
+ invoke("crm cluster run 'systemctl disable corosync-qdevice'")
+ if qdevice_reload_policy == qdevice.QdevicePolicy.QDEVICE_RELOAD:
+ logger.info("Stopping corosync-qdevice.service")
+ invoke("crm cluster run 'systemctl stop corosync-qdevice'")
+
+ with logger_utils.status_long("Removing QDevice configuration from cluster"):
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ qdevice.QDevice.remove_qdevice_config()
+ qdevice.QDevice.remove_qdevice_db()
+ update_expected_votes()
+ if qdevice_reload_policy == qdevice.QdevicePolicy.QDEVICE_RELOAD:
+ invoke("crm cluster run 'crm corosync reload'")
+ elif qdevice_reload_policy == qdevice.QdevicePolicy.QDEVICE_RESTART:
+ logger.info("Restarting cluster service")
+ utils.cluster_run_cmd("crm cluster restart")
+ wait_for_cluster()
+ else:
+ logger.warning("To remove qdevice service, need to restart cluster service manually on each node")
+
+ adjust_properties()
+
+
+def bootstrap_remove(context):
+ """
+ Remove node from cluster, or remove qdevice configuration
+ """
+ global _context
+ _context = context
+ force_flag = config.core.force or _context.force
+
+ init()
+
+ service_manager = ServiceManager()
+ if not service_manager.service_is_active("corosync.service"):
+ utils.fatal("Cluster is not active - can't execute removing action")
+
+ if _context.qdevice_rm_flag and _context.cluster_node:
+ utils.fatal("Either remove node or qdevice")
+
+ _context.skip_csync2 = not service_manager.service_is_active(CSYNC2_SERVICE)
+ if _context.skip_csync2:
+ _context.node_list_in_cluster = utils.fetch_cluster_node_list_from_node(utils.this_node())
+
+ if _context.qdevice_rm_flag:
+ remove_qdevice()
+ return
+
+ if not _context.yes_to_all and _context.cluster_node is None:
+ logger.info("""Remove This Node from Cluster:
+ You will be asked for the IP address or name of an existing node,
+ which will be removed from the cluster. This command must be
+ executed from a different node in the cluster.
+""")
+ _context.cluster_node = prompt_for_string("IP address or hostname of cluster node (e.g.: 192.168.1.1)", ".+")
+ _context.initialize_user()
+
+ if not _context.cluster_node:
+ utils.fatal("No existing IP/hostname specified (use -c option)")
+
+ remote_user, cluster_node = _parse_user_at_host(_context.cluster_node, _context.current_user)
+ cluster_node = get_node_canonical_hostname(cluster_node)
+
+ if not force_flag and not confirm("Removing node \"{}\" from the cluster: Are you sure?".format(cluster_node)):
+ return
+
+ if cluster_node == utils.this_node():
+ if not force_flag:
+ utils.fatal("Removing self requires --force")
+ remove_self(force_flag)
+ elif cluster_node in xmlutil.listnodes():
+ remove_node_from_cluster(cluster_node)
+ else:
+ utils.fatal("Specified node {} is not configured in cluster! Unable to remove.".format(cluster_node))
+
+ # In case any crm command can re-generate upgrade_seq again
+ sh.cluster_shell().get_stdout_or_raise_error("rm -rf /var/lib/crmsh", cluster_node)
+ bootstrap_finished()
+
+
+def remove_self(force_flag=False):
+ me = utils.this_node()
+ yes_to_all = _context.yes_to_all
+ nodes = xmlutil.listnodes(include_remote_nodes=False)
+ othernode = next((x for x in nodes if x != me), None)
+ if othernode is not None:
+ # remove from other node
+ cmd = "crm{} cluster remove{} -c {}".format(" -F" if force_flag else "", " -y" if yes_to_all else "", me)
+ rc, _, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(othernode, cmd)
+ if rc != 0:
+ utils.fatal("Failed to remove this node from {}".format(othernode))
+ else:
+ # disable and stop cluster
+ stop_services(SERVICES_STOP_LIST)
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ qdevice.QDevice.remove_qdevice_db([utils.this_node()])
+ rm_configuration_files()
+
+
+def init_common_geo():
+ """
+ Tasks to do both on first and other geo nodes.
+ """
+ if not utils.package_is_installed("booth"):
+ utils.fatal("Booth not installed - Not configurable as a geo cluster node.")
+
+
+def init_csync2_geo():
+ """
+ TODO: Configure csync2 for geo cluster
+ That is, create a second sync group which
+ syncs the geo configuration across the whole
+ geo cluster.
+ """
+
+
+def create_booth_authkey():
+ logger.info("Create authentication key for booth")
+ if os.path.exists(BOOTH_AUTH):
+ utils.rmfile(BOOTH_AUTH)
+ rc, _, err = invoke("booth-keygen {}".format(BOOTH_AUTH))
+ if not rc:
+ utils.fatal("Failed to generate booth authkey: {}".format(err))
+
+
+def create_booth_config(arbitrator, clusters, tickets):
+ logger.info("Configure booth")
+
+ config_template = """# The booth configuration file is "/etc/booth/booth.conf". You need to
+# prepare the same booth configuration file on each arbitrator and
+# each node in the cluster sites where the booth daemon can be launched.
+
+# "transport" means which transport layer booth daemon will use.
+# Currently only "UDP" is supported.
+transport="UDP"
+port="9929"
+"""
+ cfg = [config_template]
+ if arbitrator is not None:
+ cfg.append("arbitrator=\"{}\"".format(arbitrator))
+ for s in clusters.values():
+ cfg.append("site=\"{}\"".format(s))
+ cfg.append("authfile=\"{}\"".format(BOOTH_AUTH))
+ for t in tickets:
+ cfg.append("ticket=\"{}\"\nexpire=\"600\"".format(t))
+ cfg = "\n".join(cfg) + "\n"
+
+ if os.path.exists(BOOTH_CFG):
+ utils.rmfile(BOOTH_CFG)
+ utils.str2file(cfg, BOOTH_CFG)
+ utils.chown(BOOTH_CFG, "hacluster", "haclient")
+ os.chmod(BOOTH_CFG, 0o644)
+
+
+def bootstrap_init_geo(context):
+ """
+ Configure as a geo cluster member.
+ """
+ global _context
+ _context = context
+
+ if os.path.exists(BOOTH_CFG) and not confirm("This will overwrite {} - continue?".format(BOOTH_CFG)):
+ return
+ if os.path.exists(BOOTH_AUTH) and not confirm("This will overwrite {} - continue?".format(BOOTH_AUTH)):
+ return
+
+ init_common_geo()
+
+ # TODO:
+ # in /etc/drbd.conf or /etc/drbd.d/global_common.conf
+ # set common.startup.wfc-timeout 100
+ # set common.startup.degr-wfc-timeout 120
+
+ create_booth_authkey()
+ create_booth_config(_context.arbitrator, _context.clusters, _context.tickets)
+ logger.info("Sync booth configuration across cluster")
+ csync2_update(BOOTH_DIR)
+ init_csync2_geo()
+ geo_cib_config(_context.clusters)
+
+
+def geo_fetch_config(node):
+ cmd = "tar -c -C '{}' .".format(BOOTH_DIR)
+ with tempfile.TemporaryDirectory() as tmpdir:
+ pipe_outlet, pipe_inlet = os.pipe()
+ try:
+ child = subprocess.Popen(['tar', '-x', '-C', tmpdir], stdin=pipe_outlet, stderr=subprocess.DEVNULL)
+ except Exception:
+ os.close(pipe_inlet)
+ raise
+ finally:
+ os.close(pipe_outlet)
+ try:
+ result = sh.cluster_shell().subprocess_run_without_input(node, None, cmd, stdout=pipe_inlet, stderr=subprocess.PIPE)
+ finally:
+ os.close(pipe_inlet)
+ rc = child.wait()
+ if result.returncode != 0:
+ utils.fatal("Failed to create ssh connection to {}: {}".format(node, result.stderr))
+ if rc != 0:
+ raise ValueError("Problem encountered with booth configuration from {}.".format(node))
+ try:
+ if os.path.isfile("%s/authkey" % (tmpdir)):
+ invoke("mv %s/authkey %s" % (tmpdir, BOOTH_AUTH))
+ os.chmod(BOOTH_AUTH, 0o600)
+ if os.path.isfile("%s/booth.conf" % (tmpdir)):
+ invoke("mv %s/booth.conf %s" % (tmpdir, BOOTH_CFG))
+ os.chmod(BOOTH_CFG, 0o644)
+ except OSError as err:
+ raise ValueError("Problem encountered with booth configuration from {}: {}".format(node, err))
+
+
+def _select_user_pair_for_ssh_for_secondary_components(dest: str):
+ """Select a user pair for operating secondary components, e.g. qdevice and geo cluster arbitor"""
+ user, node = utils.parse_user_at_host(dest)
+ if user is not None:
+ try:
+ local_user = utils.user_of(utils.this_node())
+ except UserNotFoundError:
+ local_user = user
+ remote_user = user
+ else:
+ try:
+ local_user, remote_user = UserOfHost.instance().user_pair_for_ssh(node)
+ except UserNotFoundError:
+ try:
+ local_user = utils.user_of(utils.this_node())
+ except UserNotFoundError:
+ local_user = userdir.getuser()
+ remote_user = local_user
+ return local_user, remote_user, node
+
+
+def geo_cib_config(clusters):
+ cluster_name = corosync.get_values('totem.cluster_name')[0]
+ if cluster_name not in list(clusters.keys()):
+ utils.fatal("Local cluster name is {}, expected {}".format(cluster_name, "|".join(list(clusters.keys()))))
+
+ logger.info("Configure cluster resources for booth")
+ crm_template = Template("""
+primitive booth-ip ocf:heartbeat:IPaddr2 $iprules
+primitive booth-site ocf:pacemaker:booth-site \
+ meta resource-stickiness="INFINITY" \
+ params config=booth op monitor interval="10s"
+group g-booth booth-ip booth-site meta target-role=Stopped
+""")
+ iprule = 'params rule #cluster-name eq {} ip="{}"'
+
+ crm_configure_load("update", crm_template.substitute(iprules=" ".join(iprule.format(k, v) for k, v in clusters.items())))
+
+
+def bootstrap_join_geo(context):
+ """
+ Run on second cluster to add to a geo configuration.
+ It fetches its booth configuration from the other node (cluster node or arbitrator).
+ """
+ global _context
+ _context = context
+ init_common_geo()
+ check_tty()
+ user, node = utils.parse_user_at_host(_context.cluster_node)
+ if not sh.cluster_shell().can_run_as(node, 'root'):
+ local_user, remote_user, node = _select_user_pair_for_ssh_for_secondary_components(_context.cluster_node)
+ if context.use_ssh_agent:
+ try:
+ ssh_agent = ssh_key.AgentClient()
+ keys = ssh_agent.list()
+ logger.info("Using public keys from ssh-agent...")
+ except ssh_key.Error:
+ logger.error("Cannot get a public key from ssh-agent.")
+ raise
+ local_shell = sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK')})
+ join_ssh_with_ssh_agent(local_shell, local_user, node, remote_user, keys)
+ else:
+ configure_ssh_key(local_user)
+ if 0 != utils.ssh_copy_id_no_raise(local_user, remote_user, node):
+ raise ValueError(f"Failed to login to {remote_user}@{node}. Please check the credentials.")
+ swap_public_ssh_key(node, local_user, remote_user, local_user, remote_user, add=True)
+ user_by_host = utils.HostUserConfig()
+ user_by_host.add(local_user, utils.this_node())
+ user_by_host.add(remote_user, node)
+ user_by_host.set_no_generating_ssh_key(context.use_ssh_agent)
+ user_by_host.save_local()
+ geo_fetch_config(node)
+ logger.info("Sync booth configuration across cluster")
+ csync2_update(BOOTH_DIR)
+ geo_cib_config(_context.clusters)
+
+
+def bootstrap_arbitrator(context):
+ """
+ Configure this machine as an arbitrator.
+ It fetches its booth configuration from a cluster node already in the cluster.
+ """
+ global _context
+ _context = context
+
+ init_common_geo()
+ check_tty()
+ user, node = utils.parse_user_at_host(_context.cluster_node)
+ if not sh.cluster_shell().can_run_as(node, 'root'):
+ local_user, remote_user, node = _select_user_pair_for_ssh_for_secondary_components(_context.cluster_node)
+ if context.use_ssh_agent:
+ try:
+ ssh_agent = ssh_key.AgentClient()
+ keys = ssh_agent.list()
+ logger.info("Using public keys from ssh-agent...")
+ except ssh_key.Error:
+ logger.error("Cannot get a public key from ssh-agent.")
+ raise
+ local_shell = sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK')})
+ join_ssh_with_ssh_agent(local_shell, local_user, node, remote_user, keys)
+ else:
+ configure_ssh_key(local_user)
+ if 0 != utils.ssh_copy_id_no_raise(local_user, remote_user, node):
+ raise ValueError(f"Failed to login to {remote_user}@{node}. Please check the credentials.")
+ swap_public_ssh_key(node, local_user, remote_user, local_user, remote_user, add=True)
+ user_by_host = utils.HostUserConfig()
+ user_by_host.add(local_user, utils.this_node())
+ user_by_host.add(remote_user, node)
+ user_by_host.set_no_generating_ssh_key(context.use_ssh_agent)
+ user_by_host.save_local()
+ geo_fetch_config(node)
+ if not os.path.isfile(BOOTH_CFG):
+ utils.fatal("Failed to copy {} from {}".format(BOOTH_CFG, _context.cluster_node))
+ # TODO: verify that the arbitrator IP in the configuration is us?
+ logger.info("Enabling and starting the booth arbitrator service")
+ ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).start_service("booth@booth", enable=True)
+
+
+def get_stonith_timeout_generally_expected():
+ """
+ Adjust stonith-timeout for all scenarios, formula is:
+
+ stonith-timeout = STONITH_TIMEOUT_DEFAULT + token + consensus
+ """
+ stonith_enabled = utils.get_property("stonith-enabled")
+ # When stonith disabled, return
+ if utils.is_boolean_false(stonith_enabled):
+ return None
+
+ return STONITH_TIMEOUT_DEFAULT + corosync.token_and_consensus_timeout()
+
+
+def adjust_pcmk_delay_max(is_2node_wo_qdevice):
+ """
+ For each fence agent,
+ add parameter pcmk_delay_max when cluster is two-node cluster without qdevice
+ else remove pcmk_delay_max
+ """
+ cib_factory.refresh()
+
+ shell = sh.cluster_shell()
+ if is_2node_wo_qdevice:
+ for res in cib_factory.fence_id_list_without_pcmk_delay():
+ cmd = "crm resource param {} set pcmk_delay_max {}s".format(res, PCMK_DELAY_MAX)
+ shell.get_stdout_or_raise_error(cmd)
+ logger.debug("Add parameter 'pcmk_delay_max={}s' for resource '{}'".format(PCMK_DELAY_MAX, res))
+ else:
+ for res in cib_factory.fence_id_list_with_pcmk_delay():
+ cmd = "crm resource param {} delete pcmk_delay_max".format(res)
+ shell.get_stdout_or_raise_error(cmd)
+ logger.debug("Delete parameter 'pcmk_delay_max' for resource '{}'".format(res))
+
+
+def adjust_stonith_timeout():
+ """
+ Adjust stonith-timeout for sbd and other scenarios
+ """
+ if ServiceManager().service_is_active("sbd.service"):
+ from .sbd import SBDTimeout
+ SBDTimeout.adjust_sbd_timeout_related_cluster_configuration()
+ else:
+ value = get_stonith_timeout_generally_expected()
+ if value:
+ utils.set_property("stonith-timeout", value, conditional=True)
+
+
+def adjust_properties():
+ """
+ Adjust properties for the cluster:
+ - pcmk_delay_max
+ - stonith-timeout
+ - priority in rsc_defaults
+ - priority-fencing-delay
+
+ Call it when:
+ - node join/remove
+ - add qdevice via stage
+ - remove qdevice
+ - add sbd via stage
+ """
+ if not ServiceManager().service_is_active("pacemaker.service"):
+ return
+ is_2node_wo_qdevice = utils.is_2node_cluster_without_qdevice()
+ adjust_pcmk_delay_max(is_2node_wo_qdevice)
+ adjust_stonith_timeout()
+ adjust_priority_in_rsc_defaults(is_2node_wo_qdevice)
+ adjust_priority_fencing_delay(is_2node_wo_qdevice)
+
+
+def retrieve_all_config_files(cluster_node):
+ """
+ Retrieve config files from cluster_node if exists
+ """
+ with logger_utils.status_long("Retrieve all config files"):
+ cmd = 'cpio -o << EOF\n{}\nEOF\n'.format(
+ '\n'.join((f for f in FILES_TO_SYNC if f != CSYNC2_KEY and f != CSYNC2_CFG))
+ )
+ pipe_outlet, pipe_inlet = os.pipe()
+ try:
+ child = subprocess.Popen(['cpio', '-iu'], stdin=pipe_outlet, stderr=subprocess.DEVNULL)
+ except Exception:
+ os.close(pipe_inlet)
+ raise
+ finally:
+ os.close(pipe_outlet)
+ try:
+ result = sh.cluster_shell().subprocess_run_without_input(cluster_node, None, cmd, stdout=pipe_inlet, stderr=subprocess.DEVNULL)
+ finally:
+ os.close(pipe_inlet)
+ rc = child.wait()
+ # Some errors may happen here, since all files in FILES_TO_SYNC may not exist.
+ if result is None or result.returncode == 255:
+ utils.fatal("Failed to create ssh connect to {}".format(cluster_node))
+ if rc != 0:
+ utils.fatal("Failed to retrieve config files from {}".format(cluster_node))
+
+
+def sync_file(path):
+ """
+ Sync files between cluster nodes
+ """
+ if _context.skip_csync2:
+ utils.cluster_copy_file(path, nodes=_context.node_list_in_cluster, output=False)
+ else:
+ csync2_update(path)
+# EOF
diff --git a/crmsh/cache.py b/crmsh/cache.py
new file mode 100644
index 0000000..98b5390
--- /dev/null
+++ b/crmsh/cache.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2018 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# Cache stuff. A naive implementation.
+# Used by ra.py to cache named lists of things.
+
+import time
+
+
+_max_cache_age = 600.0 # seconds
+_stamp = time.time()
+_lists = {}
+
+
+def _clear():
+ "Clear the cache."
+ global _stamp
+ global _lists
+ _stamp = time.time()
+ _lists = {}
+
+
+def is_cached(name):
+ "True if the argument exists in the cache."
+ return retrieve(name) is not None
+
+
+def store(name, lst):
+ """
+ Stores the given list for the given name.
+ Returns the given list.
+ """
+ _lists[name] = lst
+ return lst
+
+
+def retrieve(name):
+ """
+ Returns the cached list for name, or None.
+ """
+ if time.time() - _stamp > _max_cache_age:
+ _clear()
+ return _lists.get(name)
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/cibconfig.py b/crmsh/cibconfig.py
new file mode 100644
index 0000000..8c81fec
--- /dev/null
+++ b/crmsh/cibconfig.py
@@ -0,0 +1,4045 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import copy
+import os
+import sys
+import re
+import fnmatch
+import time
+import collections
+from lxml import etree
+from . import config
+from . import options
+from . import constants
+from . import tmpfiles
+from . import clidisplay
+from . import idmgmt
+from . import schema
+from . import utils
+from . import cibverify
+from . import parse
+from . import ordereddict
+from . import orderedset
+from . import cibstatus
+from . import crm_gv
+from . import ui_utils
+from . import userdir
+from .ra import get_ra, get_properties_list, get_pe_meta, get_properties_meta, RAInfo
+from .utils import ext_cmd, safe_open_w, pipe_string, safe_close_w, crm_msec
+from .utils import ask, lines2cli, olist
+from .utils import page_string, str2tmp, ensure_sudo_readable
+from .utils import run_ptest, is_id_valid, edit_file, get_boolean, filter_string
+from .xmlutil import is_child_rsc, rsc_constraint, sanitize_cib, rename_id, get_interesting_nodes
+from .xmlutil import is_pref_location, get_topnode, new_cib, get_rscop_defaults_meta_node
+from .xmlutil import rename_rscref, is_ms_or_promotable_clone, silly_constraint, is_container, fix_comments
+from .xmlutil import sanity_check_nvpairs, merge_nodes, op2list, mk_rsc_type, is_resource
+from .xmlutil import stuff_comments, is_comment, is_constraint, read_cib, processing_sort_cli
+from .xmlutil import find_operation, get_rsc_children_ids, is_primitive, referenced_resources
+from .xmlutil import cibdump2elem, processing_sort, get_rsc_ref_ids, merge_tmpl_into_prim
+from .xmlutil import remove_id_used_attributes, get_top_cib_nodes
+from .xmlutil import merge_attributes, is_cib_element, sanity_check_meta
+from .xmlutil import is_simpleconstraint, is_template, rmnode, is_defaults, is_live_cib
+from .xmlutil import get_rsc_operations, delete_rscref, xml_equals, lookup_node, RscState
+from .xmlutil import text2elem, is_related, check_id_ref, xml_tostring
+from .xmlutil import sanitize_cib_for_patching, is_attr_set, get_set_nodes, set_attr
+from .cliformat import get_score, nvpairs2list, abs_pos_score, cli_acl_roleref, nvpair_format
+from .cliformat import cli_nvpair, cli_acl_rule, rsc_set_constraint, get_kind, head_id_format
+from .cliformat import simple_rsc_constraint, cli_rule, cli_format
+from .cliformat import cli_acl_role, cli_acl_permission, cli_path
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+def show_unrecognized_elems(cib_elem):
+ try:
+ conf = cib_elem.findall("configuration")[0]
+ except IndexError:
+ logger.warning("CIB has no configuration element")
+ return False
+ rc = True
+ for topnode in conf.iterchildren():
+ if is_defaults(topnode) or topnode.tag == "fencing-topology":
+ continue
+ for c in topnode.iterchildren():
+ if c.tag not in cib_object_map:
+ logger.warning("unrecognized CIB element %s", c.tag)
+ rc = False
+ return rc
+
+
+#
+# object sets (enables operations on sets of elements)
+#
+def mkset_obj(*args):
+ if not cib_factory.is_cib_sane():
+ raise ValueError("CIB is not valid")
+ if args and args[0] == "xml":
+ return CibObjectSetRaw(*args[1:])
+ return CibObjectSetCli(*args)
+
+
+def set_graph_attrs(gv_obj, obj_type):
+ try:
+ for attr, attr_v in constants.graph['*'].items():
+ gv_obj.new_graph_attr(attr, attr_v)
+ except KeyError:
+ pass
+ try:
+ for attr, attr_v in constants.graph[obj_type].items():
+ gv_obj.new_graph_attr(attr, attr_v)
+ except KeyError:
+ pass
+
+
+def set_obj_attrs(gv_obj, obj_id, obj_type):
+ try:
+ for attr, attr_v in constants.graph['*'].items():
+ gv_obj.new_attr(obj_id, attr, attr_v)
+ except KeyError:
+ pass
+ try:
+ for attr, attr_v in constants.graph[obj_type].items():
+ gv_obj.new_attr(obj_id, attr, attr_v)
+ except KeyError:
+ pass
+
+
+def set_edge_attrs(gv_obj, edge_id, obj_type):
+ try:
+ for attr, attr_v in constants.graph[obj_type].items():
+ gv_obj.new_edge_attr(edge_id, attr, attr_v)
+ except KeyError:
+ pass
+
+
+def fill_nvpairs(name, node, attrs, id_hint):
+ '''
+ Fill the container node with attrs:
+ name: name of container
+ node: container Element
+ attrs: dict containing values
+ id_hint: used to generate unique ids for nvpairs
+ '''
+ subpfx = constants.subpfx_list.get(name, '')
+ subpfx = "%s_%s" % (id_hint, subpfx) if subpfx else id_hint
+ nvpair_pfx = node.get("id") or subpfx
+ for n, v in attrs.items():
+ nvpair = etree.SubElement(node, "nvpair", name=n)
+ if v is not None:
+ nvpair.set("value", v)
+ idmgmt.set_id(nvpair, None, nvpair_pfx)
+ return node
+
+
+def mkxmlnvpairs(name, attrs, id_hint):
+ '''
+ name: Name of the element.
+ attrs: dict containing a set of nvpairs.
+ hint: Used to generate ids.
+
+ Example: instance_attributes, {name: value...}, <hint>
+
+ Notes:
+
+ Other tags not containing nvpairs are fine if the dict is empty.
+
+ cluster_property_set and defaults have nvpairs as direct children.
+ In that case, use the id_hint directly as id.
+ This is important in case there are multiple sets.
+
+ '''
+ xml_node_type = "meta_attributes" if name in constants.defaults_tags else name
+ node = etree.Element(xml_node_type)
+ notops = name != "operations"
+
+ if (name == "cluster_property_set" or name in constants.defaults_tags) and id_hint:
+ node.set("id", id_hint)
+ id_ref = attrs.get("$id-ref")
+ if id_ref:
+ id_ref_2 = cib_factory.resolve_id_ref(name, id_ref)
+ node.set("id-ref", id_ref_2)
+ if notops:
+ return node # id_ref is the only attribute (if not operations)
+ if '$id-ref' in attrs:
+ del attrs['$id-ref']
+ v = attrs.get('$id')
+ if v:
+ node.set("id", v)
+ del attrs['$id']
+ elif name in constants.nvset_cli_names:
+ node.set("id", id_hint)
+ else:
+ # operations don't need no id
+ idmgmt.set_id(node, None, id_hint, id_required=notops)
+ return fill_nvpairs(name, node, attrs, id_hint)
+
+
+def copy_nvpair(nvpairs, nvp, id_hint=None):
+ """
+ Copies the given nvpair into the given tag containing nvpairs
+ """
+ logger.debug("copy_nvpair: %s", xml_tostring(nvp))
+ if 'value' not in nvp.attrib:
+ nvpairs.append(copy.deepcopy(nvp))
+ return
+ n = nvp.get('name')
+ if id_hint is None:
+ id_hint = n
+ for nvp2 in nvpairs:
+ if nvp2.get('name') == n:
+ nvp2.set('value', nvp.get('value'))
+ break
+ else:
+ m = copy.deepcopy(nvp)
+ nvpairs.append(m)
+ if 'id' not in m.attrib:
+ m.set('id', idmgmt.new(m, id_hint))
+
+
+def copy_nvpairs(tonode, fromnode):
+ """
+ copy nvpairs from fromnode to tonode.
+ things to copy can be nvpairs, comments or rules.
+ """
+ def copy_comment(cnode):
+ for nvp2 in tonode:
+ if is_comment(nvp2) and nvp2.text == cnode.text:
+ break # no need to copy
+ else:
+ tonode.append(copy.deepcopy(cnode))
+
+ def copy_id(node):
+ nid = node.get('id')
+ for nvp2 in tonode:
+ if nvp2.get('id') == nid:
+ tonode.replace(nvp2, copy.deepcopy(node))
+ break
+ else:
+ tonode.append(copy.deepcopy(node))
+
+ logger.debug("copy_nvpairs: %s -> %s", xml_tostring(fromnode), xml_tostring(tonode))
+ id_hint = tonode.get('id')
+ for c in fromnode:
+ if is_comment(c):
+ copy_comment(c)
+ elif c.tag == "nvpair":
+ copy_nvpair(tonode, c, id_hint=id_hint)
+ elif 'id' in c.attrib: # ok, it has an id, we can work with this
+ copy_id(c)
+ else: # no idea what this is, just copy it
+ tonode.append(copy.deepcopy(c))
+
+
+class CibObjectSet(object):
+ '''
+ Edit or display a set of cib objects.
+ repr() for objects representation and
+ save() used to store objects into internal structures
+ are defined in subclasses.
+ '''
+ def __init__(self, *args):
+ self.args = args
+ self._initialize()
+
+ def _initialize(self):
+ rc, self.obj_set = cib_factory.mkobj_set(*self.args)
+ self.search_rc = rc
+ self.all_set = cib_factory.get_all_obj_set()
+ self.obj_ids = orderedset.oset([o.obj_id for o in self.obj_set])
+ self.all_ids = orderedset.oset([o.obj_id for o in self.all_set])
+ self.locked_ids = self.all_ids - self.obj_ids
+
+ def _open_url(self, src):
+ if src == "-":
+ return sys.stdin
+ import urllib.request
+ import urllib.error
+ import urllib.parse
+ try:
+ ret = urllib.request.urlopen(src)
+ return ret
+ except (urllib.error.URLError, ValueError):
+ pass
+ try:
+ ret = open(src)
+ return ret
+ except IOError as e:
+ logger.error("could not open %s: %s", src, e)
+ return False
+
+ def _pre_edit(self, s):
+ '''Extra processing of the string to be editted'''
+ return s
+
+ def _post_edit(self, s):
+ '''Extra processing after editing'''
+ return s
+
+ def _edit_save(self, s):
+ '''
+ Save string s to a tmp file. Invoke editor to edit it.
+ Parse/save the resulting file. In case of syntax error,
+ allow user to reedit.
+ If no changes are done, return silently.
+ '''
+ rc = False
+ try:
+ s = self._pre_edit(s)
+ filehash = hash(s)
+ tmp = str2tmp(s)
+ if not tmp:
+ return False
+ while not rc:
+ if edit_file(tmp) != 0:
+ break
+ s = open(tmp).read()
+ if hash(s) != filehash:
+ ok = self.save(self._post_edit(s))
+ if not ok and config.core.force:
+ logger.error("Save failed and --force is set, aborting edit to avoid infinite loop")
+ elif not ok and ask("Edit or discard changes (yes to edit, no to discard)?"):
+ continue
+ rc = True
+ os.unlink(tmp)
+ except OSError as e:
+ logger.error("unlink(%s) failure: %s", tmp, e)
+ except IOError as msg:
+ logger.error(msg)
+ return rc
+
+ def edit(self):
+ if options.batch:
+ logger.info("edit not allowed in batch mode")
+ return False
+ with clidisplay.nopretty():
+ s = self.repr()
+ # don't allow edit if one or more elements were not
+ # found
+ if not self.search_rc:
+ return self.search_rc
+ return self._edit_save(s)
+
+ def _filter_save(self, fltr, s):
+ '''
+ Pipe string s through a filter. Parse/save the output.
+ If no changes are done, return silently.
+ '''
+ rc, outp = filter_string(fltr, s)
+ if rc != 0:
+ return False
+ if hash(outp) == hash(s):
+ return True
+ return self.save(outp)
+
+ def filter(self, fltr):
+ with clidisplay.nopretty():
+ s = self.repr(format_mode=-1)
+ # don't allow filter if one or more elements were not
+ # found
+ if not self.search_rc:
+ return self.search_rc
+ return self._filter_save(fltr, s)
+
+ def save_to_file(self, fname):
+ f = safe_open_w(fname)
+ if not f:
+ return False
+ rc = True
+ with clidisplay.nopretty():
+ s = self.repr()
+ if s:
+ f.write(s)
+ f.write('\n')
+ elif self.obj_set:
+ rc = False
+ safe_close_w(f)
+ return rc
+
+ def _get_gv_obj(self, gtype):
+ if not self.obj_set:
+ return True, None
+ if gtype not in crm_gv.gv_types:
+ logger.error("graphviz type %s is not supported", gtype)
+ return False, None
+ gv_obj = crm_gv.gv_types[gtype]()
+ set_graph_attrs(gv_obj, ".")
+ return True, gv_obj
+
+ def _graph_repr(self, gv_obj):
+ '''Let CIB elements produce graph elements.
+ '''
+ for obj in processing_sort_cli(list(self.obj_set)):
+ obj.repr_gv(gv_obj, from_grp=False)
+
+ def query_graph(self, *args):
+ "usage: graph <pe> [<gtype> [<file> [<img_format>]]]"
+ rc, gtype, outf, ftype = ui_utils.graph_args(args)
+ if not rc:
+ return None
+ rc, d = utils.load_graphviz_file(userdir.GRAPHVIZ_USER_FILE)
+ if rc and d:
+ constants.graph = d
+ if outf is None:
+ return self.show_graph(gtype)
+ elif gtype == ftype:
+ rc = self.save_graph(gtype, outf)
+ else:
+ rc = self.graph_img(gtype, outf, ftype)
+ return rc
+
+ def show_graph(self, gtype):
+ '''Display graph using dotty'''
+ rc, gv_obj = self._get_gv_obj(gtype)
+ if not rc or not gv_obj:
+ return rc
+ self._graph_repr(gv_obj)
+ return gv_obj.display()
+
+ def graph_img(self, gtype, outf, img_type):
+ '''Render graph to image and save it to a file (done by
+ dot(1))'''
+ rc, gv_obj = self._get_gv_obj(gtype)
+ if not rc or not gv_obj:
+ return rc
+ self._graph_repr(gv_obj)
+ return gv_obj.image(img_type, outf)
+
+ def save_graph(self, gtype, outf):
+ '''Save graph to a file'''
+ rc, gv_obj = self._get_gv_obj(gtype)
+ if not rc or not gv_obj:
+ return rc
+ self._graph_repr(gv_obj)
+ return gv_obj.save(outf)
+
+ def show(self):
+ s = self.repr()
+ if s:
+ page_string(s)
+ return self.search_rc
+
+ def import_file(self, method, fname):
+ '''
+ method: update or replace or push
+ '''
+ if not cib_factory.is_cib_sane():
+ return False
+ f = self._open_url(fname)
+ if not f:
+ return False
+ s = f.read()
+ if f != sys.stdin:
+ f.close()
+ if method == 'push':
+ return self.save(s, remove=True, method='update')
+ else:
+ return self.save(s, remove=False, method=method)
+
+ def repr(self, format_mode=0):
+ '''
+ Return a string with objects's representations (either
+ CLI or XML).
+ '''
+ return ''
+
+ def save(self, s, remove=True, method='replace'):
+ '''
+ For each object:
+ - try to find a corresponding object in obj_set
+ - if (update and not found) or found:
+ replace the object in the obj_set with
+ the new object
+ - if not found: create new
+ See below for specific implementations.
+ '''
+ pass
+
+ def _check_unique_clash(self, set_obj_all):
+ 'Check whether resource parameters with attribute "unique" clash'
+ def process_primitive(prim, clash_dict):
+ '''
+ Update dict clash_dict with
+ (ra_class, ra_provider, ra_type, name, value) -> [ resourcename ]
+ if parameter "name" should be unique
+ '''
+ ra_id = prim.get("id")
+ r_node = reduce_primitive(prim)
+ if r_node is None:
+ return # template not defined yet
+ ra_type = node.get("type")
+ ra_class = node.get("class")
+ ra_provider = node.get("provider")
+ ra = get_ra(r_node)
+ if ra.mk_ra_node() is None: # no RA found?
+ return
+ ra_params = ra.params()
+ for p in r_node.xpath("./instance_attributes/nvpair"):
+ name, value = p.get("name"), p.get("value")
+ if value is None:
+ continue
+ # don't fail if the meta-data doesn't contain the
+ # expected attributes
+ if name in ra_params and ra_params[name].get("unique") == "1":
+ clash_dict[(ra_class, ra_provider, ra_type, name, value)].append(ra_id)
+ return
+ # we check the whole CIB for clashes as a clash may originate between
+ # an object already committed and a new one
+ check_set = set([o.obj_id
+ for o in self.obj_set
+ if o.obj_type == "primitive"])
+ if not check_set:
+ return 0
+ clash_dict = collections.defaultdict(list)
+ for obj in set_obj_all.obj_set:
+ node = obj.node
+ if is_primitive(node):
+ process_primitive(node, clash_dict)
+ # but we only warn if a 'new' object is involved
+ rc = 0
+ for param, resources in list(clash_dict.items()):
+ # at least one new object must be involved
+ if len(resources) > 1 and len(set(resources) & check_set) > 0:
+ rc = 2
+ msg = 'Resources %s violate uniqueness for parameter "%s": "%s"' % (
+ ",".join(sorted(resources)), param[3], param[4])
+ logger.warning(msg)
+ return rc
+
+ def semantic_check(self, set_obj_all):
+ '''
+ Test objects for sanity. This is about semantics.
+ '''
+ rc = self._check_unique_clash(set_obj_all)
+ for obj in sorted(self.obj_set, key=lambda x: x.obj_id):
+ rc |= obj.check_sanity()
+ return rc
+
+
+class CibObjectSetCli(CibObjectSet):
+ '''
+ Edit or display a set of cib objects (using cli notation).
+ '''
+ vim_stx_str = "# vim: set filetype=pcmk:\n"
+
+ def __init__(self, *args):
+ CibObjectSet.__init__(self, *args)
+
+ def repr_nopretty(self, format_mode=1):
+ with clidisplay.nopretty():
+ return self.repr(format_mode=format_mode)
+
+ def repr(self, format_mode=1):
+ "Return a string containing cli format of all objects."
+ if not self.obj_set:
+ return ''
+ return '\n'.join(obj.repr_cli(format_mode=format_mode)
+ for obj in processing_sort_cli(list(self.obj_set)))
+
+ def _pre_edit(self, s):
+ '''Extra processing of the string to be edited'''
+ if config.core.editor.startswith("vi"):
+ return "%s\n%s" % (s, self.vim_stx_str)
+ return s
+
+ def _post_edit(self, s):
+ if config.core.editor.startswith("vi"):
+ return s.replace(self.vim_stx_str, "")
+ return s
+
+ def _get_id(self, node):
+ '''
+ Get the id from a CLI representation. Normally, it should
+ be value of the id attribute, but sometimes the
+ attribute is missing.
+ '''
+ if node.tag == 'fencing-topology':
+ return 'fencing_topology'
+ if node.tag in constants.defaults_tags:
+ return node[0].get('id')
+ return node.get('id')
+
+ def save(self, s, remove=True, method='replace'):
+ '''
+ Save a user supplied cli format configuration.
+ On errors user is typically asked to review the
+ configuration (for instance on editting).
+
+ On errors, the user is asked to edit again (if we're
+ coming from edit). The original CIB is preserved and no
+ changes are made.
+ '''
+ diff = CibDiff(self)
+ rc = True
+ comments = []
+ with logger_utils.line_number():
+ for cli_text in lines2cli(s):
+ logger_utils.incr_lineno()
+ node = parse.parse(cli_text, comments=comments)
+ if node not in (False, None):
+ rc = rc and diff.add(node)
+ elif node is False:
+ rc = False
+
+ # we can't proceed if there was a syntax error, but we
+ # can ask the user to fix problems
+ if not rc:
+ return rc
+
+ rc = diff.apply(cib_factory, mode='cli', remove=remove, method=method)
+ if not rc:
+ self._initialize()
+ return rc
+
+
+class CibObjectSetRaw(CibObjectSet):
+ '''
+ Edit or display one or more CIB objects (XML).
+ '''
+ def __init__(self, *args):
+ CibObjectSet.__init__(self, *args)
+
+ def repr(self, format_mode="ignored"):
+ "Return a string containing xml of all objects."
+ cib_elem = cib_factory.obj_set2cib(self.obj_set)
+
+ from .utils import obscured
+ for nvp in cib_elem.xpath('//nvpair'):
+ if 'value' in nvp.attrib:
+ nvp.set('value', obscured(nvp.get('name'), nvp.get('value')))
+
+ s = xml_tostring(cib_elem, pretty_print=True)
+ return '<?xml version="1.0" ?>\n' + s
+
+ def _get_id(self, node):
+ if node.tag == "fencing-topology":
+ return "fencing_topology"
+ return node.get("id")
+
+ def save(self, s, remove=True, method='replace'):
+ try:
+ cib_elem = etree.fromstring(s)
+ except etree.ParseError as msg:
+ logger_utils.text_xml_parse_err(msg, s)
+ return False
+ sanitize_cib(cib_elem)
+ if not show_unrecognized_elems(cib_elem):
+ return False
+ rc = True
+ diff = CibDiff(self)
+ for node in get_top_cib_nodes(cib_elem, []):
+ rc = diff.add(node)
+ if not rc:
+ return rc
+ rc = diff.apply(cib_factory, mode='xml', remove=remove, method=method)
+ if not rc:
+ self._initialize()
+ return rc
+
+ def verify(self):
+ if not self.obj_set:
+ return True
+ with clidisplay.nopretty():
+ cib = self.repr(format_mode=-1)
+ rc = cibverify.verify(cib)
+
+ if rc not in (0, 1):
+ logger.debug("verify (rc=%s): %s", rc, cib)
+ return rc in (0, 1)
+
+ def ptest(self, nograph, scores, utilization, actions, verbosity):
+ if not cib_factory.is_cib_sane():
+ return False
+ cib_elem = cib_factory.obj_set2cib(self.obj_set)
+ status = cibstatus.cib_status.get_status()
+ if status is None:
+ logger.error("no status section found")
+ return False
+ cib_elem.append(copy.deepcopy(status))
+ graph_s = etree.tostring(cib_elem)
+ return run_ptest(graph_s, nograph, scores, utilization, actions, verbosity)
+
+
+def find_comment_nodes(node):
+ return [c for c in node.iterchildren() if is_comment(c)]
+
+
+def fix_node_ids(node, oldnode):
+ """
+ Fills in missing ids, getting ids from oldnode
+ as much as possible. Tries to generate reasonable
+ ids as well.
+ """
+ hint_map = {
+ 'node': 'node',
+ 'primitive': 'rsc',
+ 'template': 'rsc',
+ 'master': 'grp',
+ 'group': 'grp',
+ 'clone': 'grp',
+ 'rsc_location': 'location',
+ 'fencing-topology': 'fencing',
+ 'tags': 'tag',
+ 'alerts': 'alert',
+ }
+
+ idless = set([
+ 'operations', 'fencing-topology', 'network', 'docker', 'rkt',
+ 'storage', 'select', 'select_attributes', 'select_fencing',
+ 'select_nodes', 'select_resources'
+ ])
+ isref = set(['resource_ref', 'obj_ref', 'crmsh-ref'])
+
+ def needs_id(node):
+ a = node.attrib
+ if node.tag in isref:
+ return False
+ return 'id-ref' not in a and node.tag not in idless
+
+ def next_prefix(node, refnode, prefix):
+ if node.tag == 'node' and 'uname' in node.attrib:
+ return node.get('uname')
+ if 'id' in node.attrib:
+ return node.get('id')
+ return prefix
+
+ def recurse(node, oldnode, prefix):
+ refnode = lookup_node(node, oldnode)
+ if needs_id(node):
+ idmgmt.set_id(node, refnode, prefix, id_required=(node.tag not in idless))
+ prefix = next_prefix(node, refnode, prefix)
+ for c in node.iterchildren():
+ if not is_comment(c):
+ recurse(c, refnode if refnode is not None else oldnode, prefix)
+
+ recurse(node, oldnode, hint_map.get(node.tag, ''))
+
+
+def resolve_idref(node):
+ """
+ resolve id-ref references that refer
+ to object ids, not attribute lists
+ """
+ id_ref = node.get('id-ref')
+ attr_list_type = node.tag
+ obj = cib_factory.find_object(id_ref)
+ if obj:
+ nodes = obj.node.xpath(".//%s" % attr_list_type)
+ if len(nodes) > 1:
+ logger.warning("%s contains more than one %s, using first", obj.obj_id, attr_list_type)
+ if len(nodes) > 0:
+ node_id = nodes[0].get("id")
+ if node_id:
+ return node_id
+ check_id_ref(cib_factory.get_cib(), id_ref)
+ return id_ref
+
+
+def resolve_references(node):
+ """
+ In the output from parse(), there are
+ possible references to other nodes in
+ the CIB. This resolves those references.
+ """
+ idrefnodes = node.xpath('.//*[@id-ref]')
+ if 'id-ref' in node.attrib:
+ idrefnodes += [node]
+ for ref in idrefnodes:
+ ref.set('id-ref', resolve_idref(ref))
+ for ref in node.iterchildren('crmsh-ref'):
+ child_id = ref.get('id')
+ # TODO: This always refers to a resource ATM.
+ # Handle case where it may refer to a node name?
+ obj = cib_factory.find_resource(child_id)
+ logger.debug("resolve_references: %s -> %s", child_id, obj)
+ if obj is not None:
+ newnode = copy.deepcopy(obj.node)
+ node.replace(ref, newnode)
+ else:
+ node.remove(ref)
+ logger.error("%s refers to missing object %s", node.get('id'), child_id)
+
+
+def id_for_node(node, id_hint=None):
+ "find id for unprocessed node"
+ root = node
+ if node.tag in constants.defaults_tags:
+ node = node[0]
+ if node.tag == 'fencing-topology':
+ obj_id = 'fencing_topology'
+ else:
+ obj_id = node.get('id') or node.get('uname')
+ if obj_id is None:
+ if node.tag == 'op':
+ if id_hint is None:
+ id_hint = node.get("rsc")
+ idmgmt.set_id(node, None, id_hint)
+ obj_id = node.get('id')
+ else:
+ defid = default_id_for_tag(root.tag)
+ if defid is not None:
+ try:
+ node.set('id', defid)
+ except TypeError as e:
+ raise ValueError('Internal error: %s (%s)' % (e, xml_tostring(node)))
+ obj_id = node.get('id')
+ idmgmt.save(obj_id)
+ if root.tag != "node" and obj_id and not is_id_valid(obj_id):
+ logger_utils.invalid_id_err(obj_id)
+ return None
+ return obj_id
+
+
+def postprocess_cli(node, oldnode=None, id_hint=None, complete_advised=False):
+ """
+ input: unprocessed but parsed XML
+ output: XML, obj_type, obj_id
+ """
+ if node.tag == 'op':
+ obj_type = 'op'
+ else:
+ obj_type = cib_object_map[node.tag][0]
+ obj_id = id_for_node(node, id_hint=id_hint)
+ if obj_id is None:
+ if obj_type == 'op':
+ # In this case, we need to delay postprocessing
+ # until we know where to insert the op
+ return node, obj_type, None
+ logger.error("No ID found for %s: %s", obj_type, xml_tostring(node))
+ return None, None, None
+ if node.tag in constants.defaults_tags:
+ node = node[0]
+ fix_node_ids(node, oldnode)
+ resolve_references(node)
+ if oldnode is not None:
+ remove_id_used_attributes(oldnode)
+ if complete_advised:
+ complete_advised_meta(node)
+ return node, obj_type, obj_id
+
+
+def complete_advised_meta(node):
+ """
+ Complete advised meta attributes
+ """
+ if node.tag != "clone":
+ return
+ primitive_list = node.xpath('primitive')
+ if not primitive_list:
+ return
+ set_list = []
+ for meta_item in ["promotable", "interleave"]:
+ if not is_attr_set(node, meta_item):
+ set_list.append(meta_item)
+ if not set_list:
+ return
+
+ meta_node = get_set_nodes(node, "meta_attributes", create=True)[0]
+ p = primitive_list[0]
+ ra_inst = RAInfo(p.get('class'), p.get('type'), p.get('provider'))
+ ra_actions_dict = ra_inst.actions()
+ if ra_actions_dict and "promote" in ra_actions_dict and "demote" in ra_actions_dict:
+ for item in set_list:
+ set_attr(meta_node, item, "true")
+ # Add interleave=true as long as it's not set, no matter if it's promotable clone or not
+ elif "interleave" in set_list:
+ set_attr(meta_node, "interleave", "true")
+
+
+def parse_cli_to_xml(cli, oldnode=None):
+ """
+ input: CLI text
+ output: XML, obj_type, obj_id
+ """
+ node = None
+ complete = False
+ comments = []
+ if isinstance(cli, str):
+ utils.auto_convert_role = False
+ for s in lines2cli(cli):
+ node = parse.parse(s, comments=comments)
+ else: # should be a pre-tokenized list
+ utils.auto_convert_role = True
+ complete = True
+ node = parse.parse(cli, comments=comments, ignore_empty=False, complete_advised=complete)
+ if node is False:
+ return None, None, None
+ elif node is None:
+ return None, None, None
+ return postprocess_cli(node, oldnode, complete_advised=complete)
+
+#
+# cib element classes (CibObject the parent class)
+#
+class CibObject(object):
+ '''
+ The top level object of the CIB. Resources and constraints.
+ '''
+ state_fmt = "%16s %-8s%-8s%-8s%-4s"
+ set_names = {}
+
+ def __init__(self, xml_obj_type):
+ if xml_obj_type not in cib_object_map:
+ logger_utils.unsupported_err(xml_obj_type)
+ return
+ self.obj_type = cib_object_map[xml_obj_type][0]
+ self.parent_type = cib_object_map[xml_obj_type][2]
+ self.xml_obj_type = xml_obj_type
+ self.origin = "" # where did it originally come from?
+ self.nocli = False # we don't support this one
+ self.nocli_warn = True # don't issue warnings all the time
+ self.updated = False # was the object updated
+ self.parent = None # object superior (group/clone/ms)
+ self.children = [] # objects inferior
+ self.obj_id = None
+ self.node = None
+
+ def __str__(self):
+ return "%s:%s" % (self.obj_type, self.obj_id)
+
+ def set_updated(self):
+ self.updated = True
+ self.propagate_updated()
+
+ def dump_state(self):
+ 'Print object status'
+ print(self.state_fmt % (self.obj_id,
+ self.origin,
+ self.updated,
+ self.parent and self.parent.obj_id or "",
+ len(self.children)))
+
+ def _repr_cli_xml(self, format_mode):
+ with clidisplay.nopretty(format_mode < 0):
+ h = clidisplay.keyword("xml")
+ l = xml_tostring(self.node, pretty_print=True).split('\n')
+ l = [x for x in l if x] # drop empty lines
+ return "%s %s" % (h, cli_format(l, break_lines=(format_mode > 0), xml=True))
+
+ def _gv_rsc_id(self):
+ if self.parent and self.parent.obj_type in constants.clonems_tags:
+ return "%s:%s" % (self.parent.obj_type, self.obj_id)
+ return self.obj_id
+
+ def _set_gv_attrs(self, gv_obj, obj_type=None):
+ if not obj_type:
+ obj_type = self.obj_type
+ obj_id = self.node.get("uname") or self.obj_id
+ set_obj_attrs(gv_obj, obj_id, obj_type)
+
+ def _set_sg_attrs(self, sg_obj, obj_type=None):
+ if not obj_type:
+ obj_type = self.obj_type
+ set_graph_attrs(sg_obj, obj_type)
+
+ def _set_edge_attrs(self, gv_obj, e_id, obj_type=None):
+ if not obj_type:
+ obj_type = self.obj_type
+ set_edge_attrs(gv_obj, e_id, obj_type)
+
+ def repr_gv(self, gv_obj, from_grp=False):
+ '''
+ Add some graphviz elements to gv_obj.
+ '''
+ pass
+
+ def normalize_parameters(self):
+ pass
+
+ def _repr_cli_head(self, format_mode):
+ 'implemented in subclasses'
+ pass
+
+ def repr_cli(self, format_mode=1):
+ '''
+ CLI representation for the node.
+ _repr_cli_head and _repr_cli_child in subclasess.
+ '''
+ if self.nocli:
+ return self._repr_cli_xml(format_mode)
+ l = []
+ with clidisplay.nopretty(format_mode < 0):
+ head_s = self._repr_cli_head(format_mode)
+ # everybody must have a head
+ if not head_s:
+ return None
+ comments = []
+ l.append(head_s)
+ desc = self.node.get("description")
+ if desc:
+ l.append(nvpair_format("description", desc))
+ for c in self.node.iterchildren():
+ if is_comment(c):
+ comments.append(c.text)
+ continue
+ s = self._repr_cli_child(c, format_mode)
+ if s:
+ l.append(s)
+ return self._cli_format_and_comment(l, comments, format_mode=format_mode)
+
+ def _attr_set_str(self, node):
+ '''
+ Add $id=<id> if the set id is referenced by another
+ element.
+
+ also show rule expressions if found
+ '''
+
+ # has_nvpairs = len(node.xpath('.//nvpair')) > 0
+ idref = node.get('id-ref')
+
+ # don't skip empty sets: skipping these breaks
+ # patching
+ # empty set
+ # if not (has_nvpairs or idref is not None):
+ # return ''
+
+ ret = "%s " % (clidisplay.keyword(self.set_names[node.tag]))
+ node_id = node.get("id")
+ if node_id is not None and cib_factory.is_id_refd(node.tag, node_id):
+ ret += "%s " % (nvpair_format("$id", node_id))
+ elif idref is not None:
+ ret += "%s " % (nvpair_format("$id-ref", idref))
+
+ if node.tag in ["docker", "network"]:
+ for item in node.keys():
+ ret += "%s " % nvpair_format(item, node.get(item))
+ if node.tag == "primitive":
+ ret += node.get('id')
+ for _type in ["port-mapping", "storage-mapping"]:
+ for c in node.iterchildren(_type):
+ ret += "%s " % _type
+ for item in c.keys():
+ ret += "%s " % nvpair_format(item, c.get(item))
+
+ score = node.get("score")
+ if score:
+ ret += "%s: " % (clidisplay.score(score))
+
+ for c in node.iterchildren():
+ if c.tag == "rule":
+ ret += "%s %s " % (clidisplay.keyword("rule"), cli_rule(c))
+ for c in node.iterchildren():
+ if c.tag == "nvpair":
+ ret += "%s " % (cli_nvpair(c))
+ if ret[-1] == ' ':
+ ret = ret[:-1]
+ return ret
+
+ def _repr_cli_child(self, c, format_mode):
+ if c.tag in self.set_names:
+ return self._attr_set_str(c)
+
+ def _get_oldnode(self):
+ '''
+ Used to retrieve sub id's.
+ '''
+ if self.obj_type == "property":
+ return get_topnode(cib_factory.get_cib(), self.parent_type)
+ elif self.obj_type in constants.defaults_tags:
+ return self.node.getparent()
+ return self.node
+
+ def set_id(self, obj_id=None):
+ if obj_id is None and self.node is not None:
+ obj_id = self.node.get("id") or self.node.get('uname')
+ if obj_id is None:
+ m = cib_object_map.get(self.node.tag)
+ if m and len(m) > 3:
+ obj_id = m[3]
+ self.obj_id = obj_id
+
+ def set_nodeid(self):
+ if self.node is not None and self.obj_id:
+ self.node.set("id", self.obj_id)
+
+ def cli2node(self, cli):
+ '''
+ Convert CLI representation to a DOM node.
+ '''
+ oldnode = self._get_oldnode()
+ node, obj_type, obj_id = parse_cli_to_xml(cli, oldnode)
+ return node
+
+ def set_node(self, node, oldnode=None):
+ self.node = node
+ self.set_id()
+ return self.node
+
+ def _cli_format_and_comment(self, l, comments, format_mode):
+ '''
+ Format and add comment (if any).
+ '''
+ s = cli_format(l, break_lines=(format_mode > 0))
+ cs = '\n'.join(comments)
+ if len(comments) and format_mode >= 0:
+ return '\n'.join([cs, s])
+ return s
+
+ def move_comments(self):
+ '''
+ Move comments to the top of the node.
+ '''
+ l = []
+ firstelem = None
+ for n in self.node.iterchildren():
+ if is_comment(n):
+ if firstelem:
+ l.append(n)
+ else:
+ if not firstelem:
+ firstelem = self.node.index(n)
+ for comm_node in l:
+ self.node.remove(comm_node)
+ self.node.insert(firstelem, comm_node)
+ firstelem += 1
+
+ def mknode(self, obj_id):
+ if self.xml_obj_type in constants.defaults_tags:
+ tag = "meta_attributes"
+ else:
+ tag = self.xml_obj_type
+ self.node = etree.Element(tag)
+ self.set_id(obj_id)
+ self.set_nodeid()
+ self.origin = "user"
+ return True
+
+ def can_be_renamed(self):
+ '''
+ Return False if this object can't be renamed.
+ '''
+ if self.obj_id is None:
+ return False
+ rscstat = RscState()
+ if not rscstat.can_delete(self.obj_id):
+ logger.error("cannot rename a running resource (%s)", self.obj_id)
+ return False
+ if not is_live_cib() and self.node.tag == "node":
+ logger.error("cannot rename nodes")
+ return False
+ return True
+
+ def cli_use_validate(self):
+ '''
+ Check validity of the object, as we know it. It may
+ happen that we don't recognize a construct, but that the
+ object is still valid for the CRM. In that case, the
+ object is marked as "CLI read only", i.e. we will neither
+ convert it to CLI nor try to edit it in that format.
+
+ The validation procedure:
+ we convert xml to cli and then back to xml. If the two
+ xml representations match then we can understand the xml.
+
+ Complication:
+ There are valid variations of the XML where the CLI syntax
+ cannot express the difference. For example, sub-tags in a
+ <primitive> are not ordered, but the CLI syntax can only express
+ one specific ordering.
+
+ This is usually not a problem unless mixing pcs and crmsh.
+ '''
+ if self.node is None:
+ return True
+ with clidisplay.nopretty():
+ cli_text = self.repr_cli(format_mode=0)
+ if not cli_text:
+ logger.debug("validation failed: %s", xml_tostring(self.node))
+ return False
+ xml2 = self.cli2node(cli_text)
+ if xml2 is None:
+ logger.debug("validation failed: %s -> %s", xml_tostring(self.node), cli_text)
+ return False
+ if not xml_equals(self.node, xml2, show=True):
+ logger.debug("validation failed: %s -> %s -> %s", xml_tostring(self.node), cli_text, xml_tostring(xml2))
+ return False
+ return True
+
+ def _verify_op_attributes(self, op_node):
+ '''
+ Check if all operation attributes are supported by the
+ schema.
+ '''
+ rc = 0
+ op_id = op_node.get("name")
+ for name in list(op_node.keys()):
+ vals = schema.rng_attr_values(op_node.tag, name)
+ if not vals:
+ continue
+ v = op_node.get(name)
+ if v not in vals:
+ logger.warning("%s: op '%s' attribute '%s' value '%s' not recognized", self.obj_id, op_id, name, v)
+ rc = 1
+ return rc
+
+ def _check_ops_attributes(self):
+ '''
+ Check if operation attributes settings are valid.
+ '''
+ rc = 0
+ if self.node is None:
+ return rc
+ for op_node in self.node.xpath("operations/op"):
+ rc |= self._verify_op_attributes(op_node)
+ return rc
+
+ def check_sanity(self):
+ '''
+ Right now, this is only for primitives.
+ And groups/clones/ms and cluster properties.
+ '''
+ return 0
+
+ def reset_updated(self):
+ self.updated = False
+ for child in self.children:
+ child.reset_updated()
+
+ def propagate_updated(self):
+ if self.parent:
+ self.parent.updated = self.updated
+ self.parent.propagate_updated()
+
+ def top_parent(self):
+ '''Return the top parent or self'''
+ if self.parent:
+ return self.parent.top_parent()
+ else:
+ return self
+
+ def meta_attributes(self, name):
+ "Returns all meta attribute values with the given name"
+ v = self.node.xpath('./meta_attributes/nvpair[@name="%s"]/@value' % (name))
+ return v
+
+ def find_child_in_node(self, child):
+ for c in self.node.iterchildren():
+ if c.tag == child.node.tag and \
+ c.get("id") == child.obj_id:
+ return c
+ return None
+
+
+def gv_first_prim(node):
+ if node.tag != "primitive":
+ for c in node.iterchildren():
+ if is_child_rsc(c):
+ return gv_first_prim(c)
+ return node.get("id")
+
+
+def gv_first_rsc(rsc_id):
+ rsc_obj = cib_factory.find_object(rsc_id)
+ if not rsc_obj:
+ return rsc_id
+ return gv_first_prim(rsc_obj.node)
+
+
+def gv_last_prim(node):
+ if node.tag != "primitive":
+ for c in node.iterchildren(reversed=True):
+ if is_child_rsc(c):
+ return gv_last_prim(c)
+ return node.get("id")
+
+
+def gv_last_rsc(rsc_id):
+ rsc_obj = cib_factory.find_object(rsc_id)
+ if not rsc_obj:
+ return rsc_id
+ return gv_last_prim(rsc_obj.node)
+
+
+def gv_edge_score_label(gv_obj, e_id, node):
+ score = get_score(node) or get_kind(node)
+ if abs_pos_score(score):
+ gv_obj.new_edge_attr(e_id, 'style', 'solid')
+ return
+ elif re.match("-?([0-9]+|inf)$", score):
+ lbl = score
+ elif score in schema.rng_attr_values('rsc_order', 'kind'):
+ lbl = score
+ elif not score:
+ lbl = 'Adv'
+ else:
+ lbl = "attr:%s" % score
+ gv_obj.new_edge_attr(e_id, 'label', lbl)
+
+
+class CibNode(CibObject):
+ '''
+ Node and node's attributes.
+ '''
+ set_names = {
+ "instance_attributes": "attributes",
+ "utilization": "utilization",
+ }
+
+ def _repr_cli_head(self, format_mode):
+ uname = self.node.get("uname")
+ s = clidisplay.keyword(self.obj_type)
+ if self.obj_id != uname:
+ if utils.noquotes(self.obj_id):
+ s = "%s %s:" % (s, self.obj_id)
+ else:
+ s = '%s $id="%s"' % (s, self.obj_id)
+ s = '%s %s' % (s, clidisplay.ident(uname))
+ node_type = self.node.get("type")
+ if node_type and node_type != constants.node_default_type:
+ s = '%s:%s' % (s, node_type)
+ return s
+
+ def repr_gv(self, gv_obj, from_grp=False):
+ '''
+ Create a gv node. The label consists of the ID.
+ Nodes are square.
+ '''
+ uname = self.node.get("uname")
+ if not uname:
+ uname = self.obj_id
+ gv_obj.new_node(uname, top_node=True)
+ gv_obj.new_attr(uname, 'label', uname)
+ self._set_gv_attrs(gv_obj)
+
+
+def reduce_primitive(node):
+ '''
+ A primitive may reference template. If so, put the two
+ together.
+ Returns:
+ - if no template reference, node itself
+ - if template reference, but no template found, None
+ - return merged primitive node into template node
+ '''
+ template = node.get("template")
+ if not template:
+ return node
+ template_obj = cib_factory.find_object(template)
+ if not template_obj:
+ return None
+ return merge_tmpl_into_prim(node, template_obj.node)
+
+
+class Op(object):
+ '''
+ Operations.
+ '''
+ elem_type = "op"
+
+ def __init__(self, op_name, prim, node=None):
+ self.prim = prim
+ self.node = node
+ self.attr_d = ordereddict.odict()
+ self.attr_d["name"] = op_name
+ if self.node is not None:
+ self.xml2dict()
+
+ def set_attr(self, n, v):
+ self.attr_d[n] = v
+
+ def get_attr(self, n):
+ try:
+ return self.attr_d[n]
+ except KeyError:
+ return None
+
+ def del_attr(self, n):
+ try:
+ del self.attr_d[n]
+ except KeyError:
+ pass
+
+ def xml2dict(self):
+ for name in list(self.node.keys()):
+ if name != "id": # skip the id
+ self.set_attr(name, self.node.get(name))
+ for p in self.node.xpath("instance_attributes/nvpair"):
+ n = p.get("name")
+ v = p.get("value")
+ if n is not None and v is not None:
+ self.set_attr(n, v)
+
+ def mkxml(self):
+ # create an xml node
+ if self.node is not None:
+ if self.node.getparent() is not None:
+ self.node.getparent().remove(self.node)
+ idmgmt.remove_xml(self.node)
+ self.node = etree.Element(self.elem_type)
+ inst_attr = {}
+ valid_attrs = olist(schema.get('attr', 'op', 'a'))
+ for n, v in self.attr_d.items():
+ if n in valid_attrs:
+ self.node.set(n, v)
+ else:
+ inst_attr[n] = v
+ idmgmt.set_id(self.node, None, self.prim)
+ if inst_attr:
+ nia = mkxmlnvpairs("instance_attributes", inst_attr, self.node.get("id"))
+ self.node.append(nia)
+ return self.node
+
+
+class CibOp(CibObject):
+ '''
+ Operations
+ '''
+
+ set_names = {
+ "instance_attributes": "op_params",
+ "meta_attributes": "op_meta"
+ }
+
+ def _repr_cli_head(self, format_mode):
+ action, pl = op2list(self.node)
+ if not action:
+ return ""
+ ret = ["%s %s" % (clidisplay.keyword("op"), action)]
+ ret += [nvpair_format(n, v) for n, v in pl]
+ return ' '.join(ret)
+
+
+class CibPrimitive(CibObject):
+ '''
+ Primitives.
+ '''
+
+ set_names = {
+ "instance_attributes": "params",
+ "meta_attributes": "meta",
+ "utilization": "utilization",
+ }
+
+ def _repr_cli_head(self, format_mode):
+ if self.obj_type == "primitive":
+ template_ref = self.node.get("template")
+ else:
+ template_ref = None
+ if template_ref:
+ rsc_spec = "@%s" % clidisplay.idref(template_ref)
+ else:
+ rsc_spec = mk_rsc_type(self.node)
+ s = clidisplay.keyword(self.obj_type)
+ ident = clidisplay.ident(self.obj_id)
+ return "%s %s %s" % (s, ident, rsc_spec)
+
+ def _repr_cli_child(self, c, format_mode):
+ if c.tag in self.set_names:
+ return self._attr_set_str(c)
+ elif c.tag == "operations":
+ l = []
+ s = ''
+ c_id = c.get("id")
+ if c_id:
+ s = nvpair_format('$id', c_id)
+ idref = c.get("id-ref")
+ if idref:
+ s = '%s %s' % (s, nvpair_format('$id-ref', idref))
+ if s:
+ l.append("%s %s" % (clidisplay.keyword("operations"), s))
+ for op_node in c.iterchildren():
+ op_obj = cib_object_map[op_node.tag][1](op_node.tag)
+ op_obj.set_node(op_node)
+ l.append(op_obj.repr_cli(format_mode > 0))
+ return cli_format(l, break_lines=(format_mode > 0))
+
+ def _append_op(self, op_node):
+ try:
+ ops_node = self.node.findall("operations")[0]
+ except IndexError:
+ ops_node = etree.SubElement(self.node, "operations")
+ ops_node.append(op_node)
+
+ def add_operation(self, node):
+ # check if there is already an op with the same interval
+ name = node.get("name")
+ interval = node.get("interval")
+ if find_operation(self.node, name, interval) is not None:
+ logger.error("%s already has a %s op with interval %s", self.obj_id, name, interval)
+ return None
+ # create an xml node
+ if 'id' not in node.attrib:
+ idmgmt.set_id(node, None, self.obj_id)
+ valid_attrs = olist(schema.get('attr', 'op', 'a'))
+ inst_attr = {}
+ for attr in list(node.attrib.keys()):
+ if attr not in valid_attrs:
+ inst_attr[attr] = node.attrib[attr]
+ del node.attrib[attr]
+ if inst_attr:
+ attr_nodes = node.xpath('./instance_attributes')
+ if len(attr_nodes) == 1:
+ fill_nvpairs("instance_attributes", attr_nodes[0], inst_attr, node.get("id"))
+ else:
+ nia = mkxmlnvpairs("instance_attributes", inst_attr, node.get("id"))
+ node.append(nia)
+
+ self._append_op(node)
+ comments = find_comment_nodes(node)
+ for comment in comments:
+ node.remove(comment)
+ if comments and self.node is not None:
+ stuff_comments(self.node, [c.text for c in comments])
+ self.set_updated()
+ return self
+
+ def del_operation(self, op_node):
+ if op_node.getparent() is None:
+ return
+ ops_node = op_node.getparent()
+ op_node.getparent().remove(op_node)
+ idmgmt.remove_xml(op_node)
+ if len(ops_node) == 0:
+ rmnode(ops_node)
+ self.set_updated()
+
+ def is_dummy_operation(self, op_node):
+ '''If the op has just name, id, and interval=0, then it's
+ not of much use.'''
+ interval = op_node.get("interval")
+ if len(op_node) == 0 and crm_msec(interval) == 0:
+ attr_names = set(op_node.keys())
+ basic_attr_names = set(["id", "name", "interval"])
+ if len(attr_names ^ basic_attr_names) == 0:
+ return True
+ return False
+
+ def set_op_attr(self, op_node, attr_n, attr_v):
+ name = op_node.get("name")
+ op_obj = Op(name, self.obj_id, op_node)
+ op_obj.set_attr(attr_n, attr_v)
+ new_op_node = op_obj.mkxml()
+ self._append_op(new_op_node)
+ # the resource is updated
+ self.set_updated()
+ return new_op_node
+
+ def del_op_attr(self, op_node, attr_n):
+ name = op_node.get("name")
+ op_obj = Op(name, self.obj_id, op_node)
+ op_obj.del_attr(attr_n)
+ new_op_node = op_obj.mkxml()
+ self._append_op(new_op_node)
+ self.set_updated()
+ return new_op_node
+
+ def normalize_parameters(self):
+ """
+ Normalize parameter names:
+ If a parameter "foo-bar" is set but the
+ agent doesn't have a parameter "foo-bar",
+ and instead has a parameter "foo_bar", then
+ change the name to set the value of "foo_bar"
+ instead.
+ """
+ r_node = self.node
+ if self.obj_type == "primitive":
+ r_node = reduce_primitive(self.node)
+ if r_node is None:
+ return
+ ra = get_ra(r_node)
+ ra.normalize_parameters(r_node)
+
+ def check_sanity(self):
+ '''
+ Check operation timeouts and if all required parameters
+ are defined.
+ '''
+ if self.node is None: # eh?
+ logger.error("%s: no xml (strange)", self.obj_id)
+ return utils.get_check_rc()
+ rc3 = sanity_check_meta(self.obj_id, self.node, constants.rsc_meta_attributes)
+ if self.obj_type == "primitive":
+ r_node = reduce_primitive(self.node)
+ if r_node is None:
+ logger.error("%s: no such resource template", self.node.get("template"))
+ return utils.get_check_rc()
+ else:
+ r_node = self.node
+ ra = get_ra(r_node)
+ if ra.mk_ra_node() is None: # no RA found?
+ if cib_factory.is_asymm_cluster():
+ return rc3
+ if config.core.ignore_missing_metadata:
+ return rc3
+ ra.error("no such resource agent")
+ return utils.get_check_rc()
+ actions = get_rsc_operations(r_node)
+ default_timeout = get_default_timeout()
+ rc2 = ra.sanity_check_ops(self.obj_id, actions, default_timeout)
+ rc4 = self._check_ops_attributes()
+ params = []
+ for c in r_node.iterchildren("instance_attributes"):
+ params += nvpairs2list(c)
+ rc1 = ra.sanity_check_params(self.obj_id,
+ params,
+ existence_only=(self.obj_type != "primitive"))
+ return rc1 | rc2 | rc3 | rc4
+
+ def repr_gv(self, gv_obj, from_grp=False):
+ '''
+ Create a gv node. The label consists of the ID and the
+ RA type.
+ '''
+ if self.obj_type == "primitive":
+ # if we belong to a group, but were not called with
+ # from_grp=True, then skip
+ if not from_grp and self.parent and self.parent.obj_type == "group":
+ return
+ n = reduce_primitive(self.node)
+ if n is None:
+ raise ValueError("Referenced template not found")
+ ra_class = n.get("class")
+ ra_type = n.get("type")
+ lbl_top = self._gv_rsc_id()
+ if ra_class in ("ocf", "stonith"):
+ lbl_bottom = ra_type
+ else:
+ lbl_bottom = "%s:%s" % (ra_class, ra_type)
+ gv_obj.new_node(self.obj_id, norank=(ra_class == "stonith"))
+ gv_obj.new_attr(self.obj_id, 'label', '%s\\n%s' % (lbl_top, lbl_bottom))
+ self._set_gv_attrs(gv_obj)
+ self._set_gv_attrs(gv_obj, "class:%s" % ra_class)
+ # if it's clone/ms, then get parent graph attributes
+ if self.parent and self.parent.obj_type in constants.clonems_tags:
+ self._set_gv_attrs(gv_obj, self.parent.obj_type)
+
+ template_ref = self.node.get("template")
+ if template_ref:
+ e = [template_ref, self.obj_id]
+ e_id = gv_obj.new_edge(e)
+ self._set_edge_attrs(gv_obj, e_id, 'template:edge')
+
+ elif self.obj_type == "rsc_template":
+ n = reduce_primitive(self.node)
+ if n is None:
+ raise ValueError("Referenced template not found")
+ ra_class = n.get("class")
+ ra_type = n.get("type")
+ lbl_top = self._gv_rsc_id()
+ if ra_class in ("ocf", "stonith"):
+ lbl_bottom = ra_type
+ else:
+ lbl_bottom = "%s:%s" % (ra_class, ra_type)
+ gv_obj.new_node(self.obj_id, norank=(ra_class == "stonith"))
+ gv_obj.new_attr(self.obj_id, 'label', '%s\\n%s' % (lbl_top, lbl_bottom))
+ self._set_gv_attrs(gv_obj)
+ self._set_gv_attrs(gv_obj, "class:%s" % ra_class)
+ # if it's clone/ms, then get parent graph attributes
+ if self.parent and self.parent.obj_type in constants.clonems_tags:
+ self._set_gv_attrs(gv_obj, self.parent.obj_type)
+
+
+class CibContainer(CibObject):
+ '''
+ Groups and clones and ms.
+ '''
+ set_names = {
+ "instance_attributes": "params",
+ "meta_attributes": "meta",
+ }
+
+ def _repr_cli_head(self, format_mode):
+ children = []
+ for c in self.node.iterchildren():
+ if (self.obj_type == "group" and is_primitive(c)) or \
+ is_child_rsc(c):
+ children.append(clidisplay.rscref(c.get("id")))
+ elif self.obj_type in constants.clonems_tags and is_child_rsc(c):
+ children.append(clidisplay.rscref(c.get("id")))
+ s = clidisplay.keyword(self.obj_type)
+ ident = clidisplay.ident(self.obj_id)
+ return "%s %s %s" % (s, ident, ' '.join(children))
+
+ def check_sanity(self):
+ '''
+ Check meta attributes.
+ '''
+ if self.node is None: # eh?
+ logger.error("%s: no xml (strange)", self.obj_id)
+ return utils.get_check_rc()
+ l = constants.rsc_meta_attributes
+ if self.obj_type == "clone":
+ l += constants.clone_meta_attributes
+ elif self.obj_type == "ms":
+ l += constants.clone_meta_attributes + constants.ms_meta_attributes
+ elif self.obj_type == "group":
+ l += constants.group_meta_attributes
+ rc = sanity_check_meta(self.obj_id, self.node, l)
+ return rc
+
+ def repr_gv(self, gv_obj, from_grp=False):
+ '''
+ A group is a subgraph.
+ Clones and ms just get different attributes.
+ '''
+ if self.obj_type != "group":
+ return
+ sg_obj = gv_obj.group([x.obj_id for x in self.children],
+ "cluster_%s" % self.obj_id)
+ sg_obj.new_graph_attr('label', self._gv_rsc_id())
+ self._set_sg_attrs(sg_obj, self.obj_type)
+ if self.parent and self.parent.obj_type in constants.clonems_tags:
+ self._set_sg_attrs(sg_obj, self.parent.obj_type)
+ for child_rsc in self.children:
+ child_rsc.repr_gv(sg_obj, from_grp=True)
+
+
+class CibBundle(CibObject):
+ '''
+ bundle type resource
+ '''
+ set_names = {
+ "instance_attributes": "params",
+ "meta_attributes": "meta",
+ "docker": "docker",
+ "network": "network",
+ "storage": "storage",
+ "primitive": "primitive",
+ "meta": "meta"
+ }
+
+ def _repr_cli_head(self, format_mode):
+ s = clidisplay.keyword(self.obj_type)
+ ident = clidisplay.ident(self.obj_id)
+ return "%s %s" % (s, ident)
+
+ def _repr_cli_child(self, c, format_mode):
+ return self._attr_set_str(c)
+
+
+def _check_if_constraint_ref_is_child(obj):
+ """
+ Used by check_sanity for constraints to verify
+ that referenced resources are not children in
+ a container.
+ """
+ rc = 0
+ for rscid in obj.referenced_resources():
+ tgt = cib_factory.find_object(rscid)
+ if not tgt:
+ logger.warning("%s: resource %s does not exist", obj.obj_id, rscid)
+ rc = 1
+ elif tgt.parent and tgt.parent.obj_type == "group":
+ if obj.obj_type == "colocation":
+ logger.warning("%s: resource %s is grouped, constraints should apply to the group", obj.obj_id, rscid)
+ rc = 1
+ elif tgt.parent and tgt.parent.obj_type in constants.container_tags:
+ logger.warning("%s: resource %s ambiguous, apply constraints to container", obj.obj_id, rscid)
+ rc = 1
+ return rc
+
+
+class CibLocation(CibObject):
+ '''
+ Location constraint.
+ '''
+
+ def _repr_cli_head(self, format_mode):
+ rsc = None
+ if "rsc" in list(self.node.keys()):
+ rsc = self.node.get("rsc")
+ elif "rsc-pattern" in list(self.node.keys()):
+ rsc = '/%s/' % (self.node.get("rsc-pattern"))
+ if rsc is not None:
+ rsc = clidisplay.rscref(rsc)
+ elif self.node.find("resource_set") is not None:
+ rsc = '{ %s }' % (' '.join(rsc_set_constraint(self.node, self.obj_type)))
+ else:
+ logger.error("%s: unknown rsc_location format", self.obj_id)
+ return None
+ s = clidisplay.keyword(self.obj_type)
+ ident = clidisplay.ident(self.obj_id)
+ s = "%s %s %s" % (s, ident, rsc)
+
+ known_attrs = ['role', 'resource-discovery']
+ for attr in known_attrs:
+ val = self.node.get(attr)
+ if val is not None:
+ s += " %s=%s" % (attr, val)
+
+ pref_node = self.node.get("node")
+ score = clidisplay.score(get_score(self.node))
+ if pref_node is not None:
+ s = "%s %s: %s" % (s, score, pref_node)
+ return s
+
+ def _repr_cli_child(self, c, format_mode):
+ if c.tag == "rule":
+ return "%s %s" % \
+ (clidisplay.keyword("rule"), cli_rule(c))
+
+ def check_sanity(self):
+ '''
+ Check if node references match existing nodes.
+ '''
+ if self.node is None: # eh?
+ logger.error("%s: no xml (strange)", self.obj_id)
+ return utils.get_check_rc()
+ rc = 0
+ uname = self.node.get("node")
+ if uname and uname.lower() not in [ident.lower() for ident in cib_factory.node_id_list()]:
+ logger.warning("%s: referenced node %s does not exist", self.obj_id, uname)
+ rc = 1
+ pattern = self.node.get("rsc-pattern")
+ if pattern:
+ try:
+ re.compile(pattern)
+ except IndexError as e:
+ logger.warning("%s: '%s' may not be a valid regular expression (%s)", self.obj_id, pattern, e)
+ rc = 1
+ except re.error as e:
+ logger.warning("%s: '%s' may not be a valid regular expression (%s)", self.obj_id, pattern, e)
+ rc = 1
+ for enode in self.node.xpath("rule/expression"):
+ if enode.get("attribute") == "#uname":
+ uname = enode.get("value")
+ ids = [i.lower() for i in cib_factory.node_id_list()]
+ if uname and uname.lower() not in ids:
+ logger.warning("%s: referenced node %s does not exist", self.obj_id, uname)
+ rc = 1
+ rc2 = _check_if_constraint_ref_is_child(self)
+ if rc2 > rc:
+ rc = rc2
+ return rc
+
+ def referenced_resources(self):
+ ret = self.node.xpath('.//resource_set/resource_ref/@id')
+ return ret or [self.node.get("rsc")]
+
+ def repr_gv(self, gv_obj, from_grp=False):
+ '''
+ What to do with the location constraint?
+ '''
+ pref_node = self.node.get("node")
+ if pref_node is not None:
+ score_n = self.node
+ # otherwise, it's too complex to render
+ elif is_pref_location(self.node):
+ score_n = self.node.findall("rule")[0]
+ exp = self.node.xpath("rule/expression")[0]
+ pref_node = exp.get("value")
+ if pref_node is None:
+ return
+ rsc_id = gv_first_rsc(self.node.get("rsc"))
+ if rsc_id is not None:
+ e = [pref_node, rsc_id]
+ e_id = gv_obj.new_edge(e)
+ self._set_edge_attrs(gv_obj, e_id)
+ gv_edge_score_label(gv_obj, e_id, score_n)
+
+
+def _opt_set_name(n):
+ return "cluster%s" % n.get("id")
+
+
+def rsc_set_gv_edges(node, gv_obj):
+ def traverse_set(cum, st):
+ e = []
+ for i, elem in enumerate(cum):
+ if isinstance(elem, list):
+ for rsc in elem:
+ cum2 = copy.copy(cum)
+ cum2[i] = rsc
+ traverse_set(cum2, st)
+ return
+ else:
+ e.append(elem)
+ st.append(e)
+
+ cum = []
+ for n in node.iterchildren("resource_set"):
+ sequential = get_boolean(n.get("sequential"), True)
+ require_all = get_boolean(n.get("require-all"), True)
+ l = get_rsc_ref_ids(n)
+ if not require_all and len(l) > 1:
+ sg_name = _opt_set_name(n)
+ cum.append('[%s]%s' % (sg_name, l[0]))
+ elif not sequential and len(l) > 1:
+ cum.append(l)
+ else:
+ cum += l
+ st = []
+ # deliver only 2-edges
+ for i, lvl in enumerate(cum):
+ if i == len(cum)-1:
+ break
+ traverse_set([cum[i], cum[i+1]], st)
+ return st
+
+
+class CibSimpleConstraint(CibObject):
+ '''
+ Colocation and order constraints.
+ '''
+
+ def _repr_cli_head(self, format_mode):
+ s = clidisplay.keyword(self.obj_type)
+ ident = clidisplay.ident(self.obj_id)
+ score = get_score(self.node) or get_kind(self.node)
+ if self.node.find("resource_set") is not None:
+ col = rsc_set_constraint(self.node, self.obj_type)
+ else:
+ col = simple_rsc_constraint(self.node, self.obj_type)
+ if not col:
+ return None
+ if self.obj_type == "order":
+ symm = self.node.get("symmetrical")
+ if symm:
+ col.append("symmetrical=%s" % symm)
+ elif self.obj_type == "colocation":
+ node_attr = self.node.get("node-attribute")
+ if node_attr:
+ col.append("node-attribute=%s" % node_attr)
+ s = "%s %s " % (s, ident)
+ if score != '':
+ s += "%s: " % (clidisplay.score(score))
+ return s + ' '.join(col)
+
+ def _mk_optional_set(self, gv_obj, n):
+ '''
+ Put optional resource set in a box.
+ '''
+ members = get_rsc_ref_ids(n)
+ sg_name = _opt_set_name(n)
+ sg_obj = gv_obj.optional_set(members, sg_name)
+ self._set_sg_attrs(sg_obj, "optional_set")
+
+ def _mk_one_edge(self, gv_obj, e):
+ '''
+ Create an edge between two resources (used for resource
+ sets). If the first resource name starts with '[', it's
+ an optional resource set which is later put into a subgraph.
+ The edge then goes from the subgraph to the resource
+ which follows. An expensive exception.
+ '''
+ optional_rsc = False
+ r = re.match(r'\[(.*)\]', e[0])
+ if r:
+ optional_rsc = True
+ sg_name = r.group(1)
+ e = [re.sub(r'\[(.*)\]', '', x) for x in e]
+ e = [gv_last_rsc(e[0]), gv_first_rsc(e[1])]
+ e_id = gv_obj.new_edge(e)
+ gv_edge_score_label(gv_obj, e_id, self.node)
+ if optional_rsc:
+ self._set_edge_attrs(gv_obj, e_id, 'optional_set')
+ gv_obj.new_edge_attr(e_id, 'ltail', gv_obj.gv_id(sg_name))
+
+ def repr_gv(self, gv_obj, from_grp=False):
+ '''
+ What to do with the collocation constraint?
+ '''
+ if self.obj_type != "order":
+ return
+ if self.node.find("resource_set") is not None:
+ for e in rsc_set_gv_edges(self.node, gv_obj):
+ self._mk_one_edge(gv_obj, e)
+ for n in self.node.iterchildren("resource_set"):
+ if not get_boolean(n.get("require-all"), True):
+ self._mk_optional_set(gv_obj, n)
+ else:
+ self._mk_one_edge(gv_obj, [
+ self.node.get("first"),
+ self.node.get("then")])
+
+ def referenced_resources(self):
+ ret = self.node.xpath('.//resource_set/resource_ref/@id')
+ if ret:
+ return ret
+ if self.obj_type == "order":
+ return [self.node.get("first"), self.node.get("then")]
+ elif self.obj_type == "colocation":
+ return [self.node.get("rsc"), self.node.get("with-rsc")]
+ elif self.node.get("rsc"):
+ return [self.node.get("rsc")]
+
+ def check_sanity(self):
+ if self.node is None:
+ logger.error("%s: no xml (strange)", self.obj_id)
+ return utils.get_check_rc()
+ return _check_if_constraint_ref_is_child(self)
+
+
+class CibRscTicket(CibSimpleConstraint):
+ '''
+ rsc_ticket constraint.
+ '''
+
+ def _repr_cli_head(self, format_mode):
+ s = clidisplay.keyword(self.obj_type)
+ ident = clidisplay.ident(self.obj_id)
+ ticket = clidisplay.ticket(self.node.get("ticket"))
+ if self.node.find("resource_set") is not None:
+ col = rsc_set_constraint(self.node, self.obj_type)
+ else:
+ col = simple_rsc_constraint(self.node, self.obj_type)
+ if not col:
+ return None
+ a = self.node.get("loss-policy")
+ if a:
+ col.append("loss-policy=%s" % a)
+ return "%s %s %s: %s" % (s, ident, ticket, ' '.join(col))
+
+
+class CibProperty(CibObject):
+ '''
+ Cluster properties.
+ '''
+
+ def _repr_cli_head(self, format_mode):
+ return "%s %s" % (clidisplay.keyword(self.obj_type),
+ head_id_format(self.obj_id))
+
+ def _repr_cli_child(self, c, format_mode):
+ if c.tag == "rule":
+ return ' '.join((clidisplay.keyword("rule"),
+ cli_rule(c)))
+ elif c.tag == "nvpair":
+ return cli_nvpair(c)
+ else:
+ return ''
+
+ def check_sanity(self):
+ '''
+ Match properties with PE metadata.
+ '''
+ if self.node is None: # eh?
+ logger.error("%s: no xml (strange)", self.obj_id)
+ return utils.get_check_rc()
+ l = []
+ if self.obj_type == "property":
+ # don't check property sets which are not
+ # "cib-bootstrap-options", they are probably used by
+ # some resource agents such as mysql to store RA
+ # specific state
+ if self.obj_id != cib_object_map[self.xml_obj_type][3]:
+ return 0
+ l = get_properties_list()
+ l += constants.extra_cluster_properties
+ elif self.obj_type == "op_defaults":
+ l = schema.get('attr', 'op', 'a')
+ elif self.obj_type == "rsc_defaults":
+ l = constants.rsc_meta_attributes
+ rc = sanity_check_nvpairs(self.obj_id, self.node, l)
+ return rc
+
+
+def is_stonith_rsc(xmlnode):
+ '''
+ True if resource is stonith or derived from stonith template.
+ '''
+ xmlnode = reduce_primitive(xmlnode)
+ if xmlnode is None:
+ return False
+ return xmlnode.get('class') == 'stonith'
+
+
+class CibFencingOrder(CibObject):
+ '''
+ Fencing order (fencing-topology).
+ '''
+
+ def set_id(self, obj_id=None):
+ self.obj_id = "fencing_topology"
+
+ def set_nodeid(self):
+ '''This id is not part of attributes'''
+ pass
+
+ def __str__(self):
+ return self.obj_id
+
+ def can_be_renamed(self):
+ ''' Cannot rename this one. '''
+ return False
+
+ def _repr_cli_head(self, format_mode):
+ s = clidisplay.keyword(self.obj_type)
+ d = ordereddict.odict()
+ for c in self.node.iterchildren("fencing-level"):
+ if "target-pattern" in c.attrib:
+ target = (None, c.get("target-pattern"))
+ elif "target-attribute" in c.attrib:
+ target = (c.get("target-attribute"), c.get("target-value"))
+ else:
+ target = c.get("target")
+ if target not in d:
+ d[target] = {}
+ d[target][c.get("index")] = c.get("devices")
+ dd = ordereddict.odict()
+ for target in list(d.keys()):
+ sorted_keys = sorted([int(i) for i in list(d[target].keys())])
+ dd[target] = [d[target][str(x)] for x in sorted_keys]
+ d2 = {}
+ for target in list(dd.keys()):
+ devs_s = ' '.join(dd[target])
+ d2[devs_s] = 1
+ if len(d2) == 1 and len(d) == len(cib_factory.node_id_list()):
+ return "%s %s" % (s, devs_s)
+
+ def fmt_target(tgt):
+ if isinstance(tgt, tuple):
+ if tgt[0] is None:
+ return "pattern:%s" % (tgt[1])
+ return "attr:%s=%s" % tgt
+ return tgt + ":"
+ return cli_format([s] + ["%s %s" % (fmt_target(x), ' '.join(dd[x]))
+ for x in list(dd.keys())],
+ break_lines=(format_mode > 0))
+
+ def _repr_cli_child(self, c, format_mode):
+ pass # no children here
+
+ def check_sanity(self):
+ '''
+ Targets are nodes and resource are stonith resources.
+ '''
+ if self.node is None: # eh?
+ logger.error("%s: no xml (strange)", self.obj_id)
+ return utils.get_check_rc()
+ rc = 0
+ nl = self.node.findall("fencing-level")
+ for target in [x.get("target") for x in nl if x.get("target") is not None]:
+ if target.lower() not in [ident.lower() for ident in cib_factory.node_id_list()]:
+ logger.warning("%s: target %s not a node", self.obj_id, target)
+ rc = 1
+ stonith_rsc_l = [x.obj_id for x in
+ cib_factory.get_elems_on_type("type:primitive")
+ if is_stonith_rsc(x.node)]
+ for devices in [x.get("devices") for x in nl]:
+ for dev in devices.split(","):
+ if not cib_factory.find_object(dev):
+ logger.warning("%s: resource %s does not exist", self.obj_id, dev)
+ rc = 1
+ elif dev not in stonith_rsc_l:
+ logger.warning("%s: %s not a stonith resource", self.obj_id, dev)
+ rc = 1
+ return rc
+
+
+class CibAcl(CibObject):
+ '''
+ User and role ACL.
+
+ Now with support for 1.1.12 style ACL rules.
+
+ '''
+
+ def _repr_cli_head(self, format_mode):
+ s = clidisplay.keyword(self.obj_type)
+ ident = clidisplay.ident(self.obj_id)
+ return "%s %s" % (s, ident)
+
+ def _repr_cli_child(self, c, format_mode):
+ if c.tag in constants.acl_rule_names:
+ return cli_acl_rule(c, format_mode)
+ elif c.tag == "role_ref":
+ return cli_acl_roleref(c, format_mode)
+ elif c.tag == "role":
+ return cli_acl_role(c)
+ elif c.tag == "acl_permission":
+ return cli_acl_permission(c)
+
+
+class CibTag(CibObject):
+ '''
+ Tag objects
+
+ TODO: check_sanity, repr_gv
+
+ '''
+
+ def _repr_cli_head(self, fmt):
+ return ' '.join([clidisplay.keyword(self.obj_type),
+ clidisplay.ident(self.obj_id)] +
+ [clidisplay.rscref(c.get('id'))
+ for c in self.node.iterchildren() if not is_comment(c)])
+
+
+class CibAlert(CibObject):
+ '''
+ Alert objects
+
+ TODO: check_sanity, repr_gv
+
+ FIXME: display instance / meta attributes, description
+
+ '''
+ set_names = {
+ "instance_attributes": "attributes",
+ "meta_attributes": "meta",
+ }
+
+ def _repr_cli_head(self, fmt):
+ ret = [clidisplay.keyword(self.obj_type),
+ clidisplay.ident(self.obj_id),
+ cli_path(self.node.get('path'))]
+ return ' '.join(ret)
+
+ def _repr_cli_child(self, c, format_mode):
+ if c.tag in self.set_names:
+ return self._attr_set_str(c)
+ elif c.tag == "select":
+ r = ["select"]
+ for sel in c.iterchildren():
+ if not sel.tag.startswith('select_'):
+ continue
+ r.append(sel.tag.lstrip('select_'))
+ if sel.tag == 'select_attributes':
+ r.append('{')
+ r.extend(sel.xpath('attribute/@name'))
+ r.append('}')
+ return ' '.join(r)
+ elif c.tag == "recipient":
+ r = ["to"]
+ is_complex = self._is_complex()
+ if is_complex:
+ r.append('{')
+ r.append(cli_path(c.get('value')))
+ for subset in c.xpath('instance_attributes|meta_attributes'):
+ r.append(self._attr_set_str(subset))
+ if is_complex:
+ r.append('}')
+ return ' '.join(r)
+
+ def _is_complex(self):
+ '''
+ True if this alert is ambiguous wrt meta attributes in recipient tags
+ '''
+ children = [c.tag for c in self.node.xpath('recipient|instance_attributes|meta_attributes')]
+ ri = children.index('recipient')
+ if ri < 0:
+ return False
+ children = children[ri+1:]
+ return 'instance_attributes' in children or 'meta_attributes' in children
+
+
+#
+################################################################
+
+
+#
+# cib factory
+#
+cib_piped = "cibadmin -p"
+
+
+def get_default_timeout():
+ t = cib_factory.get_op_default("timeout")
+ if t is not None:
+ return t
+
+
+# xml -> cli translations (and classes)
+cib_object_map = {
+ # xml_tag: ( cli_name, element class, parent element tag, id hint )
+ "node": ("node", CibNode, "nodes"),
+ "op": ("op", CibOp, "operations"),
+ "primitive": ("primitive", CibPrimitive, "resources"),
+ "group": ("group", CibContainer, "resources"),
+ "clone": ("clone", CibContainer, "resources"),
+ "master": ("ms", CibContainer, "resources"),
+ "template": ("rsc_template", CibPrimitive, "resources"),
+ "bundle": ("bundle", CibBundle, "resources"),
+ "rsc_location": ("location", CibLocation, "constraints"),
+ "rsc_colocation": ("colocation", CibSimpleConstraint, "constraints"),
+ "rsc_order": ("order", CibSimpleConstraint, "constraints"),
+ "rsc_ticket": ("rsc_ticket", CibRscTicket, "constraints"),
+ "cluster_property_set": ("property", CibProperty, "crm_config", "cib-bootstrap-options"),
+ "rsc_defaults": ("rsc_defaults", CibProperty, "rsc_defaults", "rsc-options"),
+ "op_defaults": ("op_defaults", CibProperty, "op_defaults", "op-options"),
+ "fencing-topology": ("fencing_topology", CibFencingOrder, "configuration"),
+ "acl_role": ("role", CibAcl, "acls"),
+ "acl_user": ("user", CibAcl, "acls"),
+ "acl_target": ("acl_target", CibAcl, "acls"),
+ "acl_group": ("acl_group", CibAcl, "acls"),
+ "tag": ("tag", CibTag, "tags"),
+ "alert": ("alert", CibAlert, "alerts"),
+}
+
+
+# generate a translation cli -> tag
+backtrans = ordereddict.odict((item[0], key) for key, item in cib_object_map.items())
+
+
+def default_id_for_tag(tag):
+ "Get default id for XML tag"
+ m = cib_object_map.get(tag, tuple())
+ return m[3] if len(m) > 3 else None
+
+
+def default_id_for_obj(obj_type):
+ "Get default id for object type"
+ return default_id_for_tag(backtrans.get(obj_type))
+
+
+def can_migrate(node):
+ return 'true' in node.xpath('.//nvpair[@name="allow-migrate"]/@value')
+
+
+class CibDiff(object):
+ '''
+ Represents a cib edit order.
+ Is complicated by the fact that
+ nodes and resources can have
+ colliding ids.
+
+ Can carry changes either as CLI objects
+ or as XML statements.
+ '''
+ def __init__(self, objset):
+ self.objset = objset
+ self._node_set = orderedset.oset()
+ self._nodes = {}
+ self._rsc_set = orderedset.oset()
+ self._resources = {}
+
+ def add(self, item):
+ obj_id = id_for_node(item)
+ is_node = item.tag == 'node'
+ if obj_id is None:
+ logger.error("element %s has no id!", xml_tostring(item, pretty_print=True))
+ return False
+ elif is_node and obj_id in self._node_set:
+ logger.error("Duplicate node: %s", obj_id)
+ return False
+ elif not is_node and obj_id in self._rsc_set:
+ logger.error("Duplicate resource: %s", obj_id)
+ return False
+ elif is_node:
+ self._node_set.add(obj_id)
+ self._nodes[obj_id] = item
+ else:
+ self._rsc_set.add(obj_id)
+ self._resources[obj_id] = item
+ return True
+
+ def _obj_type(self, nid):
+ for obj in self.objset.all_set:
+ if obj.obj_id == nid:
+ return obj.obj_type
+ return None
+
+ def _is_node(self, nid):
+ for obj in self.objset.all_set:
+ if obj.obj_id == nid and obj.obj_type == 'node':
+ return True
+ return False
+
+ def _is_resource(self, nid):
+ for obj in self.objset.all_set:
+ if obj.obj_id == nid and obj.obj_type != 'node':
+ return True
+ return False
+
+ def _obj_nodes(self):
+ return orderedset.oset([n for n in self.objset.obj_ids
+ if self._is_node(n)])
+
+ def _obj_resources(self):
+ return orderedset.oset([n for n in self.objset.obj_ids
+ if self._is_resource(n)])
+
+ def _is_edit_valid(self, id_set, existing):
+ '''
+ 1. Cannot name any elements as those which exist but
+ were not picked for editing.
+ 2. Cannot remove running resources.
+ '''
+ rc = True
+ not_allowed = id_set & self.objset.locked_ids
+ rscstat = RscState()
+ if not_allowed:
+ logger.error("Elements %s already exist", ', '.join(list(not_allowed)))
+ rc = False
+ delete_set = existing - id_set
+ cannot_delete = [x for x in delete_set
+ if not rscstat.can_delete(x)]
+ if cannot_delete:
+ logger.error("Cannot delete running resources: %s", ', '.join(cannot_delete))
+ rc = False
+ return rc
+
+ def apply(self, factory, mode='cli', remove=True, method='replace'):
+ rc = True
+
+ edited_nodes = self._nodes.copy()
+ edited_resources = self._resources.copy()
+
+ def calc_sets(input_set, existing):
+ rc = True
+ if remove:
+ rc = self._is_edit_valid(input_set, existing)
+ del_set = existing - (input_set)
+ else:
+ del_set = orderedset.oset()
+ mk_set = (input_set) - existing
+ upd_set = (input_set) & existing
+ return rc, mk_set, upd_set, del_set
+
+ if not rc:
+ return rc
+
+ for e, s, existing in ((edited_nodes, self._node_set, self._obj_nodes()),
+ (edited_resources, self._rsc_set, self._obj_resources())):
+ rc, mk, upd, rm = calc_sets(s, existing)
+ if not rc:
+ return rc
+ rc = cib_factory.set_update(e, mk, upd, rm, upd_type=mode, method=method)
+ if not rc:
+ return rc
+ return rc
+
+
+class CibFactory(object):
+ '''
+ Juggle with CIB objects.
+ See check_structure below for details on the internal cib
+ representation.
+ '''
+
+ def __init__(self):
+ self._init_vars()
+ self.regtest = options.regression_tests
+ self.last_commit_time = 0
+ # internal (just not to produce silly messages)
+ self._no_constraint_rm_msg = False
+ self._crm_diff_cmd = "crm_diff --no-version"
+
+ def is_cib_sane(self):
+ # try to initialize
+ if self.cib_elem is None:
+ self.initialize()
+ if self.cib_elem is None:
+ logger_utils.empty_cib_err()
+ return False
+ return True
+
+ def get_cib(self):
+ if not self.is_cib_sane():
+ return None
+ return self.cib_elem
+ #
+ # check internal structures
+ #
+
+ def _check_parent(self, obj, parent):
+ if obj not in parent.children:
+ logger.error("object %s does not reference its child %s", parent.obj_id, obj.obj_id)
+ return False
+ if parent.node != obj.node.getparent():
+ if obj.node.getparent() is None:
+ logger.error("object %s node is not a child of its parent %s", obj.obj_id, parent.obj_id)
+ else:
+ logger.error("object %s node is not a child of its parent %s, but %s:%s",
+ obj.obj_id, parent.obj_id, obj.node.getparent().tag, obj.node.getparent().get("id"))
+ return False
+ return True
+
+ def check_structure(self):
+ if not self.is_cib_sane():
+ return False
+ rc = True
+ for obj in self.cib_objects:
+ if obj.parent:
+ if not self._check_parent(obj, obj.parent):
+ logger.debug("check_parent failed: %s %s", obj.obj_id, obj.parent)
+ rc = False
+ for child in obj.children:
+ if not child.parent:
+ logger.error("child %s does not reference its parent %s", child.obj_id, obj.obj_id)
+ rc = False
+ return rc
+
+ def regression_testing(self, param):
+ # provide some help for regression testing
+ # in particular by trying to provide output which is
+ # easier to predict
+ if param == "off":
+ self.regtest = False
+ elif param == "on":
+ self.regtest = True
+ else:
+ logger.warning("bad parameter for regtest: %s", param)
+
+ def get_schema(self):
+ return self.cib_attrs["validate-with"]
+
+ def change_schema(self, schema_st):
+ 'Use another schema'
+ if schema_st == self.get_schema():
+ logger.info("already using schema %s", schema_st)
+ return True
+ if not schema.is_supported(schema_st):
+ logger.warning("schema %s is not supported by the shell", schema_st)
+ self.cib_elem.set("validate-with", schema_st)
+ if not schema.test_schema(self.cib_elem):
+ self.cib_elem.set("validate-with", self.get_schema())
+ logger.error("schema %s does not exist", schema_st)
+ return False
+ schema.init_schema(self.cib_elem)
+ rc = True
+ for obj in self.cib_objects:
+ if schema.get('sub', obj.node.tag, 'a') is None:
+ logger.error("Element '%s' is not supported by the RNG schema %s", obj.node.tag, schema_st)
+ logger.debug("Offending object: %s", xml_tostring(obj.node))
+ rc = False
+ if not rc:
+ # revert, as some elements won't validate
+ self.cib_elem.set("validate-with", self.get_schema())
+ schema.init_schema(self.cib_elem)
+ logger.error("Schema %s conflicts with current configuration", schema_st)
+ return 4
+ self.cib_attrs["validate-with"] = schema_st
+ self.new_schema = True
+ return 0
+
+ def is_elem_supported(self, obj_type):
+ 'Do we support this element?'
+ try:
+ if schema.get('sub', backtrans[obj_type], 'a') is None:
+ return False
+ except KeyError:
+ pass
+ return True
+
+ def is_cib_supported(self):
+ 'Do we support this CIB?'
+ req = self.cib_elem.get("crm_feature_set")
+ validator = self.cib_elem.get("validate-with")
+ # if no schema is configured, just assume that it validates
+ if not validator or schema.is_supported(validator):
+ return True
+ logger_utils.cib_ver_unsupported_err(validator, req)
+ return False
+
+ def upgrade_validate_with(self, force=False):
+ """Upgrade the CIB.
+
+ Requires the force argument to be set if
+ validate-with is configured to anything other than
+ 0.6.
+ """
+ if not self.is_cib_sane():
+ return False
+ validator = self.cib_elem.get("validate-with")
+ if force or not validator or re.match("0[.]6", validator):
+ return ext_cmd("cibadmin --upgrade --force") == 0
+
+ def _import_cib(self, cib_elem):
+ 'Parse the current CIB (from cibadmin -Q).'
+ self.cib_elem = cib_elem
+ if self.cib_elem is None:
+ return False
+ if not self.is_cib_supported():
+ logger.warning("CIB schema is not supported by the shell")
+ self._get_cib_attributes(self.cib_elem)
+ schema.init_schema(self.cib_elem)
+ return True
+
+ def _get_cib_attributes(self, cib):
+ for attr in list(cib.keys()):
+ self.cib_attrs[attr] = cib.get(attr)
+
+ def _set_cib_attributes(self, cib):
+ for attr in self.cib_attrs:
+ cib.set(attr, self.cib_attrs[attr])
+
+ def _copy_cib_attributes(self, src_cib, cib):
+ """
+ Copy CIB attributes from src_cib to cib.
+ Also updates self.cib_attrs.
+ Preserves attributes that may be modified by
+ the user (for example validate-with).
+ """
+ attrs = ((attr, src_cib.get(attr))
+ for attr in self.cib_attrs
+ if attr not in constants.cib_user_attrs)
+ for attr, value in attrs:
+ self.cib_attrs[attr] = value
+ cib.set(attr, value)
+
+ def obj_set2cib(self, obj_set, obj_filter=None):
+ '''
+ Return document containing objects in obj_set.
+ Must remove all children from the object list, because
+ printing xml of parents will include them.
+ Optional filter to sieve objects.
+ '''
+ cib_elem = new_cib()
+ # get only top parents for the objects in the list
+ # e.g. if we get a primitive which is part of a clone,
+ # then the clone gets in, not the primitive
+ # dict will weed out duplicates
+ d = {}
+ for obj in obj_set:
+ if obj_filter and not obj_filter(obj):
+ continue
+ d[obj.top_parent()] = 1
+ for obj in d:
+ get_topnode(cib_elem, obj.parent_type).append(copy.deepcopy(obj.node))
+ self._set_cib_attributes(cib_elem)
+ return cib_elem
+
+ #
+ # commit changed objects to the CIB
+ #
+ def _attr_match(self, c, a):
+ 'Does attribute match?'
+ return c.get(a) == self.cib_attrs.get(a)
+
+ def is_current_cib_equal(self, silent=False):
+ cib_elem = read_cib(cibdump2elem)
+ if cib_elem is None:
+ return False
+ rc = self._attr_match(cib_elem, 'epoch') and \
+ self._attr_match(cib_elem, 'admin_epoch')
+ if not silent and not rc:
+ logger.warning("CIB changed in the meantime: won't touch it!")
+ return rc
+
+ def _state_header(self):
+ 'Print object status header'
+ print(CibObject.state_fmt % \
+ ("", "origin", "updated", "parent", "children"))
+
+ def showobjects(self):
+ self._state_header()
+ for obj in self.cib_objects:
+ obj.dump_state()
+ if self.remove_queue:
+ print("Remove queue:")
+ for obj in self.remove_queue:
+ obj.dump_state()
+
+ def commit(self, force=False, replace=False):
+ 'Commit the configuration to the CIB.'
+ if not self.is_cib_sane():
+ return False
+ if not replace:
+ rc = self._patch_cib(force)
+ else:
+ rc = self._replace_cib(force)
+ if rc:
+ # reload the cib!
+ t = time.time()
+ logger.debug("CIB commit successful at %s", t)
+ if is_live_cib():
+ self.last_commit_time = t
+ self.refresh()
+
+ utils.check_no_quorum_policy_with_dlm()
+ return rc
+
+ def _update_schema(self):
+ '''
+ Set the validate-with, if the schema changed.
+ '''
+ s = '<cib validate-with="%s"/>' % self.cib_attrs["validate-with"]
+ rc = pipe_string("%s -U" % cib_piped, s)
+ if rc != 0:
+ logger_utils.update_err("cib", "-U", s, rc)
+ return False
+ self.new_schema = False
+ return True
+
+ def _replace_cib(self, force):
+ try:
+ conf_el = self.cib_elem.findall("configuration")[0]
+ except IndexError:
+ logger.error("cannot find the configuration element")
+ return False
+ if self.new_schema and not self._update_schema():
+ return False
+ cibadmin_opts = force and "-R --force" or "-R"
+ rc = pipe_string("%s %s" % (cib_piped, cibadmin_opts), etree.tostring(conf_el))
+ if rc != 0:
+ logger_utils.update_err("cib", cibadmin_opts, xml_tostring(conf_el), rc)
+ return False
+ return True
+
+ def _patch_cib(self, force):
+ # copy the epoch from the current cib to both the target
+ # cib and the original one (otherwise cibadmin won't want
+ # to apply the patch)
+ current_cib = read_cib(cibdump2elem)
+ if current_cib is None:
+ return False
+
+ self._copy_cib_attributes(current_cib, self.cib_orig)
+ current_cib = None # don't need that anymore
+ self._set_cib_attributes(self.cib_elem)
+ cib_s = xml_tostring(self.cib_orig, pretty_print=True)
+ tmpf = str2tmp(cib_s, suffix=".xml")
+ if not tmpf or not ensure_sudo_readable(tmpf):
+ return False
+ tmpfiles.add(tmpf)
+ cibadmin_opts = force and "-P --force" or "-P"
+
+ # produce a diff:
+ # dump_new_conf | crm_diff -o self.cib_orig -n -
+
+ logger.debug("Basis: %s", open(tmpf).read())
+ logger.debug("Input: %s", xml_tostring(self.cib_elem))
+ rc, cib_diff = filter_string("%s -o %s -n -" %
+ (self._crm_diff_cmd, tmpf),
+ etree.tostring(self.cib_elem))
+ if not cib_diff and (rc == 0):
+ # no diff = no action
+ return True
+ elif not cib_diff:
+ logger.error("crm_diff apparently failed to produce the diff (rc=%d)", rc)
+ return False
+
+ # for v1 diffs, fall back to non-patching if
+ # any containers are modified, else strip the digest
+ if "<diff" in cib_diff and "digest=" in cib_diff:
+ if not self.can_patch_v1():
+ return self._replace_cib(force)
+ e = etree.fromstring(cib_diff)
+ for tag in e.xpath("/diff"):
+ if "digest" in tag.attrib:
+ del tag.attrib["digest"]
+ cib_diff = xml_tostring(e)
+ logger.debug("Diff: %s", cib_diff)
+ rc = pipe_string("%s %s" % (cib_piped, cibadmin_opts),
+ cib_diff.encode('utf-8'))
+ if rc != 0:
+ logger_utils.update_err("cib", cibadmin_opts, cib_diff, rc)
+ return False
+ return True
+
+ def can_patch_v1(self):
+ """
+ The v1 patch format cannot handle reordering,
+ so if there are any changes to any containers
+ or acl tags, don't patch.
+ """
+ def group_changed():
+ for obj in self.cib_objects:
+ if not obj.updated:
+ continue
+ if obj.obj_type in constants.container_tags:
+ return True
+ if obj.obj_type in ('user', 'role', 'acl_target', 'acl_group'):
+ return True
+ return False
+ return not group_changed()
+
+ #
+ # initialize cib_objects from CIB
+ #
+ def _create_object_from_cib(self, node, pnode=None):
+ '''
+ Need pnode (parent node) acrobacy because cluster
+ properties and rsc/op_defaults hold stuff in a
+ meta_attributes child.
+ '''
+ assert node is not None
+ if pnode is None:
+ pnode = node
+ obj = cib_object_map[pnode.tag][1](pnode.tag)
+ obj.origin = "cib"
+ obj.node = node
+ obj.set_id()
+ self.cib_objects.append(obj)
+ return obj
+
+ def _populate(self):
+ "Walk the cib and collect cib objects."
+ all_nodes = get_interesting_nodes(self.cib_elem, [])
+ if not all_nodes:
+ return
+ for node in processing_sort(all_nodes):
+ if is_defaults(node):
+ for c in node.xpath("./meta_attributes"):
+ self._create_object_from_cib(c, node)
+ else:
+ self._create_object_from_cib(node)
+ for obj in self.cib_objects:
+ obj.move_comments()
+ fix_comments(obj.node)
+ self.cli_use_validate_all()
+ for obj in self.cib_objects:
+ self._update_links(obj)
+
+ def cli_use_validate_all(self):
+ for obj in self.cib_objects:
+ if not obj.cli_use_validate():
+ obj.nocli = True
+ obj.nocli_warn = False
+ # no need to warn, user can see the object displayed as XML
+ logger.debug("object %s cannot be represented in the CLI notation", obj.obj_id)
+
+ def initialize(self, cib=None):
+ if self.cib_elem is not None:
+ return True
+ if cib is None:
+ cib = read_cib(cibdump2elem)
+ elif isinstance(cib, str):
+ cib = text2elem(cib)
+ if not self._import_cib(cib):
+ return False
+ self.cib_orig = copy.deepcopy(self.cib_elem)
+ sanitize_cib_for_patching(self.cib_orig)
+ sanitize_cib(self.cib_elem)
+ show_unrecognized_elems(self.cib_elem)
+ self._populate()
+ return self.check_structure()
+
+ def _init_vars(self):
+ self.cib_elem = None # the cib
+ self.cib_orig = None # the CIB which we loaded
+ self.cib_attrs = {} # cib version dictionary
+ self.cib_objects = [] # a list of cib objects
+ self.remove_queue = [] # a list of cib objects to be removed
+ self.id_refs = {} # dict of id-refs
+ self.new_schema = False # schema changed
+ self._state = []
+
+ def _push_state(self):
+ '''
+ A rudimentary instance state backup. Just make copies of
+ all important variables.
+ idmgmt has to be backed up too.
+ '''
+ self._state.append([copy.deepcopy(x)
+ for x in (self.cib_elem,
+ self.cib_attrs,
+ self.cib_objects,
+ self.remove_queue,
+ self.id_refs)])
+ idmgmt.push_state()
+
+ def _pop_state(self):
+ try:
+ logger.debug("performing rollback from %s", self.cib_objects)
+ self.cib_elem, \
+ self.cib_attrs, self.cib_objects, \
+ self.remove_queue, self.id_refs = self._state.pop()
+ except KeyError:
+ return False
+ # need to get addresses of all new objects created by
+ # deepcopy
+ for obj in self.cib_objects:
+ obj.node = self.find_xml_node(obj.xml_obj_type, obj.obj_id)
+ self._update_links(obj)
+ idmgmt.pop_state()
+ return self.check_structure()
+
+ def _drop_state(self):
+ try:
+ self._state.pop()
+ except KeyError:
+ pass
+ idmgmt.drop_state()
+
+ def _clean_state(self):
+ self._state = []
+ idmgmt.clean_state()
+
+ def reset(self):
+ if self.cib_elem is None:
+ return
+ self.cib_elem = None
+ self.cib_orig = None
+ self._init_vars()
+ self._clean_state()
+ idmgmt.clear()
+
+ def find_objects(self, obj_id):
+ "Find objects for id (can be a wildcard-glob)."
+ def matchfn(x):
+ return x and fnmatch.fnmatch(x, obj_id)
+ if not self.is_cib_sane() or obj_id is None:
+ return None
+ objs = []
+ for obj in self.cib_objects:
+ if matchfn(obj.obj_id):
+ objs.append(obj)
+ # special case for Heartbeat nodes which have id
+ # different from uname
+ elif obj.obj_type == "node" and matchfn(obj.node.get("uname")):
+ objs.append(obj)
+ return objs
+
+ def find_object(self, obj_id):
+ if not self.is_cib_sane():
+ return None
+ objs = self.find_objects(obj_id)
+ if objs is None:
+ return None
+ if objs:
+ for obj in objs:
+ if obj.obj_type != 'node':
+ return obj
+ return objs[0]
+ return None
+
+ def find_resource(self, obj_id):
+ if not self.is_cib_sane():
+ return None
+ objs = self.find_objects(obj_id)
+ if objs is None:
+ return None
+ for obj in objs:
+ if obj.obj_type != 'node':
+ return obj
+ return None
+
+ def find_node(self, obj_id):
+ if not self.is_cib_sane():
+ return None
+ objs = self.find_objects(obj_id)
+ if objs is None:
+ return None
+ for obj in objs:
+ if obj.obj_type == 'node':
+ return obj
+ return None
+
+ #
+ # tab completion functions
+ #
+ def id_list(self):
+ "List of ids (for completion)."
+ return [x.obj_id for x in self.cib_objects]
+
+ def type_list(self):
+ "List of object types (for completion)"
+ return list(set([x.obj_type for x in self.cib_objects]))
+
+ def tag_list(self):
+ "List of tags (for completion)"
+ return list(set([x.obj_id for x in self.cib_objects if x.obj_type == "tag"]))
+
+ def prim_id_list(self):
+ "List of primitives ids (for group completion)."
+ return [x.obj_id for x in self.cib_objects if x.obj_type == "primitive"]
+
+ def fence_id_list(self):
+ """
+ List all configured fence agent's id
+ """
+ return [x.obj_id for x in self.cib_objects if x.node.get('class') == "stonith"]
+
+ def fence_id_list_with_pcmk_delay(self):
+ """
+ List all fence agent's id which configured pcmk_delay_max
+ """
+ id_list = []
+ for x in self.cib_objects:
+ if x.node.get("class") != "stonith":
+ continue
+ for c in x.node.xpath('.//nvpair'):
+ if c.get("name") == "pcmk_delay_max" and utils.crm_msec(c.get("value")) > 0:
+ id_list.append(x.obj_id)
+ break
+ return id_list
+
+ def fence_id_list_without_pcmk_delay(self):
+ """
+ List all fence agent's id which not configured pcmk_delay_max
+ """
+ return [_id for _id in self.fence_id_list() if _id not in self.fence_id_list_with_pcmk_delay()]
+
+ def children_id_list(self):
+ "List of child ids (for clone/master completion)."
+ return [x.obj_id for x in self.cib_objects if x.obj_type in constants.children_tags]
+
+ def rsc_id_list(self):
+ "List of all resource ids."
+ return [x.obj_id for x in self.cib_objects
+ if x.obj_type in constants.resource_tags]
+
+ def top_rsc_id_list(self):
+ "List of top resource ids (for constraint completion)."
+ return [x.obj_id for x in self.cib_objects
+ if x.obj_type in constants.resource_tags and not x.parent]
+
+ def node_id_list(self):
+ "List of node ids."
+ return sorted([x.node.get("uname") for x in self.cib_objects
+ if x.obj_type == "node"])
+
+ def f_prim_free_id_list(self):
+ "List of possible primitives ids (for group completion)."
+ return [x.obj_id for x in self.cib_objects
+ if x.obj_type == "primitive" and not x.parent]
+
+ def f_prim_list_in_group(self, gname):
+ "List resources in a group"
+ return [x.obj_id for x in self.cib_objects
+ if x.obj_type == "primitive" and x.parent and \
+ x.parent.obj_id == gname]
+
+ def f_group_id_list(self):
+ "List of group ids."
+ return [x.obj_id for x in self.cib_objects
+ if x.obj_type == "group"]
+
+ def rsc_template_list(self):
+ "List of templates."
+ return [x.obj_id for x in self.cib_objects
+ if x.obj_type == "rsc_template"]
+
+ def f_children_id_list(self):
+ "List of possible child ids (for clone/master completion)."
+ return [x.obj_id for x in self.cib_objects
+ if x.obj_type in constants.children_tags and not x.parent]
+
+ #
+ # a few helper functions
+ #
+ def find_container_child(self, node):
+ "Find an object which may be the child in a container."
+ for obj in reversed(self.cib_objects):
+ if node.tag == "fencing-topology" and obj.xml_obj_type == "fencing-topology":
+ return obj
+ if node.tag == obj.node.tag and node.get("id") == obj.obj_id:
+ return obj
+ return None
+
+ def find_xml_node(self, tag, ident, strict=True):
+ "Find a xml node of this type with this id."
+ try:
+ if tag in constants.defaults_tags:
+ expr = '//%s/meta_attributes[@id="%s"]' % (tag, ident)
+ elif tag == 'fencing-topology':
+ expr = '//fencing-topology'
+ else:
+ expr = '//%s[@id="%s"]' % (tag, ident)
+ return self.cib_elem.xpath(expr)[0]
+ except IndexError:
+ if strict:
+ logger.warning("strange, %s element %s not found", tag, ident)
+ return None
+
+ #
+ # Element editing stuff.
+ #
+ def default_timeouts(self, *args):
+ '''
+ Set timeouts for operations from the defaults provided in
+ the meta-data.
+ '''
+ implied_actions = ["start", "stop"]
+ implied_ms_actions = ["promote", "demote"]
+ implied_migrate_actions = ["migrate_to", "migrate_from"]
+ other_actions = ("monitor",)
+ if not self.is_cib_sane():
+ return False
+ rc = True
+ for obj_id in args:
+ obj = self.find_resource(obj_id)
+ if not obj:
+ logger_utils.no_object_err(obj_id)
+ rc = False
+ continue
+ if obj.obj_type != "primitive":
+ logger.warning("element %s is not a primitive", obj_id)
+ rc = False
+ continue
+ r_node = reduce_primitive(obj.node)
+ if r_node is None:
+ # cannot do anything without template defined
+ logger.warning("template for %s not defined", obj_id)
+ rc = False
+ continue
+ ra = get_ra(r_node)
+ if ra.mk_ra_node() is None: # no RA found?
+ if not self.is_asymm_cluster():
+ ra.error("no resource agent found for %s" % obj_id)
+ continue
+ obj_modified = False
+ for c in r_node.iterchildren():
+ if c.tag == "operations":
+ for c2 in c.iterchildren():
+ if not c2.tag == "op":
+ continue
+ op, pl = op2list(c2)
+ if not op:
+ continue
+ if op in implied_actions:
+ implied_actions.remove(op)
+ elif can_migrate(r_node) and op in implied_migrate_actions:
+ implied_migrate_actions.remove(op)
+ elif is_ms_or_promotable_clone(obj.node.getparent()) and op in implied_ms_actions:
+ implied_ms_actions.remove(op)
+ elif op not in other_actions:
+ continue
+ adv_timeout = None
+ role = c2.get('role')
+ depth = c2.get('depth')
+ adv_timeout = ra.get_op_attr_value(op, "timeout", role=role, depth=depth)
+ if adv_timeout:
+ c2.set("timeout", adv_timeout)
+ obj_modified = True
+ l = implied_actions
+ if can_migrate(r_node):
+ l += implied_migrate_actions
+ if is_ms_or_promotable_clone(obj.node.getparent()):
+ l += implied_ms_actions
+ for op in l:
+ adv_timeout = ra.get_op_attr_value(op, "timeout")
+ if not adv_timeout:
+ continue
+ n = etree.Element('op')
+ n.set('name', op)
+ n.set('timeout', adv_timeout)
+ n.set('interval', '0')
+ if not obj.add_operation(n):
+ rc = False
+ else:
+ obj_modified = True
+ if obj_modified:
+ obj.set_updated()
+ return rc
+
+ def is_id_refd(self, attr_list_type, ident):
+ '''
+ Is this ID referenced anywhere?
+ Used from cliformat
+ '''
+ try:
+ return self.id_refs[ident] == attr_list_type
+ except KeyError:
+ return False
+
+ def resolve_id_ref(self, attr_list_type, id_ref):
+ '''
+ User is allowed to specify id_ref either as a an object
+ id or as attributes id. Here we try to figure out which
+ one, i.e. if the former is the case to find the right
+ id to reference.
+ '''
+ self.id_refs[id_ref] = attr_list_type
+ obj = self.find_resource(id_ref)
+ if obj:
+ nodes = obj.node.xpath(".//%s" % attr_list_type)
+ numnodes = len(nodes)
+ if numnodes > 1:
+ logger.warning("%s contains more than one %s, using first", obj.obj_id, attr_list_type)
+ if numnodes > 0:
+ node_id = nodes[0].get("id")
+ if node_id:
+ return node_id
+ check_id_ref(self.cib_elem, id_ref)
+ return id_ref
+
+ def _get_attr_value(self, obj_type, attr):
+ if not self.is_cib_sane():
+ return None
+ for obj in self.cib_objects:
+ if obj.obj_type == obj_type and obj.node is not None:
+ for n in nvpairs2list(obj.node):
+ if n.get('name') == attr:
+ return n.get('value')
+ return None
+
+ def get_property(self, prop):
+ '''
+ Get the value of the given cluster property.
+ '''
+ return self._get_attr_value("property", prop)
+
+ def get_property_w_default(self, prop):
+ '''
+ Get the value of the given property. If it is
+ not set, return the default value.
+ '''
+ v = self.get_property(prop)
+ if v is None:
+ try:
+ v = get_properties_meta().param_default(prop)
+ except:
+ pass
+ return v
+
+ def get_op_default(self, attr):
+ '''
+ Get the value of the attribute from op_defaults.
+ '''
+ return self._get_attr_value("op_defaults", attr)
+
+ def is_asymm_cluster(self):
+ symm = self.get_property("symmetric-cluster")
+ return symm and symm != "true"
+
+ def new_object(self, obj_type, obj_id):
+ "Create a new object of type obj_type."
+ logger.debug("new_object: %s:%s", obj_type, obj_id)
+ existing = self.find_object(obj_id)
+ if existing and [obj_type, existing.obj_type].count("node") != 1:
+ logger.error("Cannot create %s with ID '%s': Found existing %s with same ID.", obj_type, obj_id, existing.obj_type)
+ return None
+ xml_obj_type = backtrans.get(obj_type)
+ v = cib_object_map.get(xml_obj_type)
+ if v is None:
+ return None
+ obj = v[1](xml_obj_type)
+ obj.obj_type = obj_type
+ obj.set_id(obj_id)
+ obj.node = None
+ obj.origin = "user"
+ return obj
+
+ def modified_elems(self):
+ return [x for x in self.cib_objects
+ if x.updated or x.origin == "user"]
+
+ def get_elems_of_ra_partial_search(self, spec):
+ """
+ Get elements by given ra class:provider:type or class:type or only class/provider/type
+ return [] if no results
+ """
+ def match_type(obj, tp):
+ type_res = obj.node.get("type")
+ return type_res and tp.lower() in type_res.lower()
+
+ content_list = spec.split(':')[1:]
+ if len(content_list) > 3:
+ return []
+ if len(content_list) == 3:
+ cls, provider, tp = content_list
+ return [x for x in self.cib_objects
+ if match_type(x, tp)
+ and x.node.get("provider") == provider
+ and x.node.get("class") == cls]
+ if len(content_list) == 2:
+ cls, tp = content_list
+ return [x for x in self.cib_objects
+ if match_type(x, tp)
+ and x.node.get("class") == cls]
+ if len(content_list) == 1:
+ tp = content_list[0]
+ if not tp:
+ return []
+ return [x for x in self.cib_objects
+ if match_type(x, tp) or
+ x.node.get("class") == tp or
+ x.node.get("provider") == tp]
+
+ def get_elems_on_type(self, spec):
+ if not spec.startswith("type:"):
+ return []
+ t = spec[5:]
+ return [x for x in self.cib_objects if x.obj_type == t]
+
+ def get_elems_on_tag(self, spec):
+ if not spec.startswith("tag:"):
+ return []
+ t = spec[4:]
+ matching_tags = [x for x in self.cib_objects if x.obj_type == 'tag' and x.obj_id == t]
+ ret = []
+ for mt in matching_tags:
+ matches = [cib_factory.find_resource(o) for o in mt.node.xpath('./obj_ref/@id')]
+ ret += [m for m in matches if m is not None]
+ return ret
+
+ def filter_objects(self, filters):
+ """
+ Filter out a set of objects given a list of filters.
+
+ Complication: We want to refine selections, for example
+ type:primitive tag:foo should give all primitives tagged foo,
+ or type:node boo should give the node boo, but not the primitive boo.
+
+ Add keywords and|or to influence selection?
+ Default to "or" between matches (like now)
+
+ type:primitive or type:group = all primitives and groups
+ type:primitive and foo = primitives with id foo
+ type:primitive and foo* = primitives that start with id foo
+ type:primitive or foo* = all that start with id foo plus all primitives
+ type:primitive and tag:foo
+
+ Returns:
+ True, set() on success
+ false, err on failure
+ """
+ if not filters:
+ return True, copy.copy(self.cib_objects)
+ if filters[0] == 'NOOBJ':
+ return True, orderedset.oset([])
+ obj_set = orderedset.oset([])
+ and_filter, and_set = False, None
+ for spec in filters:
+ if spec == "or":
+ continue
+ elif spec == "and":
+ and_filter, and_set = True, obj_set
+ obj_set = orderedset.oset([])
+ continue
+ if spec == "changed":
+ obj_set |= orderedset.oset(self.modified_elems())
+ elif spec.startswith("type:"):
+ obj_set |= orderedset.oset(self.get_elems_on_type(spec))
+ elif spec.startswith("tag:"):
+ obj_set |= orderedset.oset(self.get_elems_on_tag(spec))
+ elif spec.startswith("related:"):
+ name = spec[len("related:"):]
+ obj_set |= orderedset.oset(self.find_objects(name) or [])
+ obj = self.find_object(name)
+ if obj is not None:
+ obj_set |= orderedset.oset(self.related_elements(obj))
+ obj_set |= orderedset.oset(self.get_elems_of_ra_partial_search(spec))
+ else:
+ objs = self.find_objects(spec) or []
+ for obj in objs:
+ obj_set.add(obj)
+ if not objs:
+ return False, spec
+ if and_filter is True:
+ and_filter, obj_set = False, obj_set.intersection(and_set)
+ if and_filter is True:
+ and_filter, obj_set = False, and_set
+ return True, obj_set
+
+ def mkobj_set(self, *args):
+ rc, obj_set = self.filter_objects(args)
+ if rc is False:
+ logger_utils.no_object_err(obj_set)
+ return False, orderedset.oset([])
+ return rc, obj_set
+
+ def get_all_obj_set(self):
+ return set(self.cib_objects)
+
+ def has_no_primitives(self):
+ return not self.get_elems_on_type("type:primitive")
+
+ def has_cib_changed(self):
+ if not self.is_cib_sane():
+ return False
+ return self.modified_elems() or self.remove_queue
+
+ def ensure_cib_updated(self):
+ if options.interactive and not self.has_cib_changed():
+ self.refresh()
+
+ def _verify_constraints(self, node):
+ '''
+ Check if all resources referenced in a constraint exist
+ '''
+ rc = True
+ constraint_id = node.get("id")
+ for obj_id in referenced_resources(node):
+ if not self.find_resource(obj_id):
+ logger_utils.constraint_norefobj_err(constraint_id, obj_id)
+ rc = False
+ return rc
+
+ def _verify_rsc_children(self, obj):
+ '''
+ Check prerequisites:
+ a) all children must exist
+ b) no child may have more than one parent
+ c) there may not be duplicate children
+ '''
+ obj_id = obj.obj_id
+ rc = True
+ c_dict = {}
+ for c in obj.node.iterchildren():
+ if not is_cib_element(c):
+ continue
+ child_id = c.get("id")
+ if not self._verify_child(child_id, obj.node.tag, obj_id):
+ rc = False
+ if child_id in c_dict:
+ logger.error("in group %s child %s listed more than once", obj_id, child_id)
+ rc = False
+ c_dict[child_id] = 1
+ for other in [x for x in self.cib_objects
+ if x != obj and is_container(x.node)]:
+ shared_obj = set(obj.children) & set(other.children)
+ if shared_obj:
+ logger.error("%s contained in both %s and %s", ','.join([x.obj_id for x in shared_obj]), obj_id, other.obj_id)
+ rc = False
+ return rc
+
+ def _verify_child(self, child_id, parent_tag, obj_id):
+ 'Check if child exists and obj_id is (or may become) its parent.'
+ child = self.find_resource(child_id)
+ if not child:
+ logger_utils.no_object_err(child_id)
+ return False
+ if parent_tag == "group" and child.obj_type != "primitive":
+ logger.error("a group may contain only primitives; %s is %s", child_id, child.obj_type)
+ return False
+ if child.parent and child.parent.obj_id != obj_id:
+ logger.error("%s already in use at %s", child_id, child.parent.obj_id)
+ return False
+ if child.node.tag not in constants.children_tags:
+ logger.error("%s may contain a primitive or a group; %s is %s", parent_tag, child_id, child.obj_type)
+ return False
+ return True
+
+ def _verify_element(self, obj):
+ '''
+ Can we create this object given its CLI representation.
+ This is not about syntax, we're past that, but about
+ semantics.
+ Right now we check if the children, if any, are fit for
+ the parent. And if this is a constraint, if all
+ referenced resources are present.
+ '''
+ rc = True
+ node = obj.node
+ obj_id = obj.obj_id
+ try:
+ cib_object_map[node.tag][0]
+ except KeyError:
+ logger.error("element %s (%s) not recognized", node.tag, obj_id)
+ return False
+ if is_container(node):
+ rc &= self._verify_rsc_children(obj)
+ elif is_constraint(node):
+ rc &= self._verify_constraints(node)
+ return rc
+
+ def create_object(self, *args):
+ if not self.is_cib_sane():
+ return False
+ return self.create_from_cli(list(args)) is not None
+
+ def set_property_cli(self, obj_type, node):
+ pset_id = node.get('id') or default_id_for_obj(obj_type)
+ obj = self.find_object(pset_id)
+ # If id is the default, use any existing set rather create another one.
+ if not obj and pset_id == default_id_for_obj(obj_type):
+ objs = self.get_elems_on_type("type:%s" %obj_type)
+ if objs and len(objs) > 0:
+ obj = objs[-1]
+ if not obj:
+ if not is_id_valid(pset_id):
+ logger_utils.invalid_id_err(pset_id)
+ return None
+ obj = self.new_object(obj_type, pset_id)
+ if not obj:
+ return None
+ topnode = get_topnode(self.cib_elem, obj.parent_type)
+ obj.node = etree.SubElement(topnode, node.tag)
+ obj.origin = "user"
+ obj.node.set('id', pset_id)
+ topnode.append(obj.node)
+ self.cib_objects.append(obj)
+ copy_nvpairs(obj.node, node)
+ obj.normalize_parameters()
+ obj.set_updated()
+ return obj
+
+ def add_op(self, node):
+ '''Add an op to a primitive.'''
+ # does the referenced primitive exist
+ rsc_id = node.get('rsc')
+ rsc_obj = self.find_resource(rsc_id)
+ if not rsc_obj:
+ logger_utils.no_object_err(rsc_id)
+ return None
+ if rsc_obj.obj_type != "primitive":
+ logger.error("%s is not a primitive", rsc_id)
+ return None
+
+ # the given node is not postprocessed
+ node, obj_type, obj_id = postprocess_cli(node, id_hint=rsc_obj.obj_id)
+
+ del node.attrib['rsc']
+ return rsc_obj.add_operation(node)
+
+ def create_from_cli(self, cli):
+ 'Create a new cib object from the cli representation.'
+ if not self.is_cib_sane():
+ logger.debug("create_from_cli (%s): is_cib_sane() failed", cli)
+ return None
+ if isinstance(cli, (list, str)):
+ elem, obj_type, obj_id = parse_cli_to_xml(cli)
+ else:
+ elem, obj_type, obj_id = postprocess_cli(cli)
+ if elem is None:
+ # FIXME: raise error?
+ logger.debug("create_from_cli (%s): failed", cli)
+ return None
+ logger.debug("create_from_cli: %s, %s, %s", xml_tostring(elem), obj_type, obj_id)
+ if obj_type in olist(constants.nvset_cli_names):
+ return self.set_property_cli(obj_type, elem)
+ if obj_type == "op":
+ return self.add_op(elem)
+ if obj_type == "node":
+ obj = self.find_node(obj_id)
+ # make an exception and allow updating nodes
+ if obj:
+ self.merge_from_cli(obj, elem)
+ return obj
+ obj = self.new_object(obj_type, obj_id)
+ if not obj:
+ return None
+ return self._add_element(obj, elem)
+
+ def update_from_cli(self, obj, node, method):
+ '''
+ Replace element from the cli intermediate.
+ If this is an update and the element is properties, then
+ the new properties should be merged with the old.
+ Otherwise, users may be surprised.
+ '''
+ if method == 'update' and obj.obj_type in constants.nvset_cli_names:
+ return self.merge_from_cli(obj, node)
+ return self.update_element(obj, node)
+
+ def update_from_node(self, obj, node):
+ 'Update element from a doc node.'
+ idmgmt.replace_xml(obj.node, node)
+ return self.update_element(obj, node)
+
+ def update_element(self, obj, newnode):
+ 'Update element from a doc node.'
+ if newnode is None:
+ return False
+ if not self.is_cib_sane():
+ idmgmt.replace_xml(newnode, obj.node)
+ return False
+ oldnode = obj.node
+ if xml_equals(oldnode, newnode):
+ if newnode.getparent() is not None:
+ newnode.getparent().remove(newnode)
+ return True # the new and the old versions are equal
+ obj.node = newnode
+ logger.debug("update CIB element: %s", str(obj))
+ if oldnode.getparent() is not None:
+ oldnode.getparent().replace(oldnode, newnode)
+ obj.nocli = False # try again after update
+ if not self._adjust_children(obj):
+ return False
+ if not obj.cli_use_validate():
+ logger.debug("update_element: validation failed (%s, %s)", obj, xml_tostring(newnode))
+ obj.nocli_warn = True
+ obj.nocli = True
+ obj.set_updated()
+ return True
+
+ def merge_from_cli(self, obj, node):
+ logger.debug("merge_from_cli: %s %s", obj.obj_type, xml_tostring(node))
+ if obj.obj_type in constants.nvset_cli_names:
+ rc = merge_attributes(obj.node, node, "nvpair")
+ else:
+ rc = merge_nodes(obj.node, node)
+ if rc:
+ obj.set_updated()
+ return True
+
+ def _cli_set_update(self, edit_d, mk_set, upd_set, del_set, method):
+ '''
+ Create/update/remove elements.
+ edit_d is a dict with id keys and parsed xml values.
+ mk_set is a set of ids to be created.
+ upd_set is a set of ids to be updated (replaced).
+ del_set is a set to be removed.
+ method is either replace or update.
+ '''
+ logger.debug("_cli_set_update: mk=%s, upd=%s, del=%s", mk_set, upd_set, del_set)
+ test_l = []
+
+ def obj_is_container(x):
+ obj = self.find_resource(x)
+ return obj and is_container(obj.node)
+
+ def obj_is_constraint(x):
+ obj = self.find_resource(x)
+ return obj and is_constraint(obj.node)
+
+ del_constraints = []
+ del_containers = []
+ del_objs = []
+ for x in del_set:
+ if obj_is_constraint(x):
+ del_constraints.append(x)
+ elif obj_is_container(x):
+ del_containers.append(x)
+ else:
+ del_objs.append(x)
+
+ # delete constraints and containers first in case objects are moved elsewhere
+ if not self.delete(*del_constraints):
+ logger.debug("delete %s failed", list(del_set))
+ return False
+ if not self.delete(*del_containers):
+ logger.debug("delete %s failed", list(del_set))
+ return False
+
+ for cli in processing_sort([edit_d[x] for x in mk_set]):
+ obj = self.create_from_cli(cli)
+ if not obj:
+ logger.debug("create_from_cli '%s' failed", xml_tostring(cli, pretty_print=True))
+ return False
+ test_l.append(obj)
+
+ for ident in upd_set:
+ if edit_d[ident].tag == 'node':
+ obj = self.find_node(ident)
+ else:
+ obj = self.find_resource(ident)
+ if not obj:
+ logger.debug("%s not found!", ident)
+ return False
+ node, _, _ = postprocess_cli(edit_d[ident], oldnode=obj.node)
+ if node is None:
+ logger.debug("postprocess_cli failed: %s", ident)
+ return False
+ if not self.update_from_cli(obj, node, method):
+ logger.debug("update_from_cli failed: %s, %s, %s", obj, xml_tostring(node), method)
+ return False
+ test_l.append(obj)
+
+ if not self.delete(*reversed(del_objs)):
+ logger.debug("delete %s failed", list(del_set))
+ return False
+ rc = True
+ for obj in test_l:
+ if not self.test_element(obj):
+ logger.debug("test_element failed for %s", obj)
+ rc = False
+ return rc & self.check_structure()
+
+ def _xml_set_update(self, edit_d, mk_set, upd_set, del_set):
+ '''
+ Create/update/remove elements.
+ node_l is a list of elementtree elements.
+ mk_set is a set of ids to be created.
+ upd_set is a set of ids to be updated (replaced).
+ del_set is a set to be removed.
+ '''
+ logger.debug("_xml_set_update: %s, %s, %s", mk_set, upd_set, del_set)
+ test_l = []
+ for el in processing_sort([edit_d[x] for x in mk_set]):
+ obj = self.create_from_node(el)
+ if not obj:
+ return False
+ test_l.append(obj)
+ for ident in upd_set:
+ if edit_d[ident].tag == 'node':
+ obj = self.find_node(ident)
+ else:
+ obj = self.find_resource(ident)
+ if not obj:
+ return False
+ if not self.update_from_node(obj, edit_d[ident]):
+ return False
+ test_l.append(obj)
+ if not self.delete(*list(del_set)):
+ return False
+ rc = True
+ for obj in test_l:
+ if not self.test_element(obj):
+ rc = False
+ return rc & self.check_structure()
+
+ def _set_update(self, edit_d, mk_set, upd_set, del_set, upd_type, method):
+ if upd_type == "xml":
+ return self._xml_set_update(edit_d, mk_set, upd_set, del_set)
+ return self._cli_set_update(edit_d, mk_set, upd_set, del_set, method)
+
+ def set_update(self, edit_d, mk_set, upd_set, del_set, upd_type="cli", method='replace'):
+ '''
+ Just a wrapper for _set_update() to allow for a
+ rollback.
+ '''
+ self._push_state()
+ if not self._set_update(edit_d, mk_set, upd_set, del_set, upd_type, method):
+ if not self._pop_state():
+ raise RuntimeError("this should never happen!")
+ return False
+ self._drop_state()
+ return True
+
+ def _adjust_children(self, obj):
+ '''
+ All stuff children related: manage the nodes of children,
+ update the list of children for the parent, update
+ parents in the children.
+ '''
+ new_children_ids = get_rsc_children_ids(obj.node)
+ if not new_children_ids:
+ return True
+ old_children = [x for x in obj.children if x.parent == obj]
+ new_children = [self.find_resource(x) for x in new_children_ids]
+ new_children = [c for c in new_children if c is not None]
+ obj.children = new_children
+ # relink orphans to top
+ for child in set(old_children) - set(obj.children):
+ logger.debug("relink child %s to top", str(child))
+ self._relink_child_to_top(child)
+ if not self._are_children_orphans(obj):
+ return False
+ return self._update_children(obj)
+
+ def _relink_child_to_top(self, obj):
+ 'Relink a child to the top node.'
+ get_topnode(self.cib_elem, obj.parent_type).append(obj.node)
+ obj.parent = None
+
+ def _are_children_orphans(self, obj):
+ """
+ Check if we're adding a container containing objects
+ we've already added to a different container
+ """
+ for child in obj.children:
+ if not child.parent:
+ continue
+ if child.parent == obj or child.parent.obj_id == obj.obj_id:
+ continue
+ if child.parent.obj_type in constants.container_tags:
+ logger.error("Cannot create %s: Child %s already in %s", obj, child, child.parent)
+ return False
+ return True
+
+ def _update_children(self, obj):
+ '''For composite objects: update all children nodes.
+ '''
+ # unlink all and find them in the new node
+ for child in obj.children:
+ oldnode = child.node
+ newnode = obj.find_child_in_node(child)
+ if newnode is None:
+ logger.error("Child found in children list but not in node: %s, %s", obj, child)
+ return False
+ child.node = newnode
+ if child.children: # and children of children
+ if not self._update_children(child):
+ return False
+ rmnode(oldnode)
+ if child.parent:
+ child.parent.updated = True
+ child.parent = obj
+ return True
+
+ def test_element(self, obj):
+ if obj.xml_obj_type not in constants.defaults_tags:
+ if not self._verify_element(obj):
+ return False
+ if utils.is_check_always() and obj.check_sanity() > 1:
+ return False
+ return True
+
+ def _update_links(self, obj):
+ '''
+ Update the structure links for the object (obj.children,
+ obj.parent). Update also the XML, if necessary.
+ '''
+ obj.children = []
+ if obj.obj_type not in constants.container_tags:
+ return
+ for c in obj.node.iterchildren():
+ if is_child_rsc(c):
+ child = self.find_container_child(c)
+ if not child:
+ logger_utils.missing_obj_err(c)
+ continue
+ child.parent = obj
+ obj.children.append(child)
+ if c != child.node:
+ rmnode(child.node)
+ child.node = c
+
+ def _add_element(self, obj, node):
+ assert node is not None
+ obj.node = node
+ obj.set_id()
+ pnode = get_topnode(self.cib_elem, obj.parent_type)
+ logger.debug("_add_element: append child %s to %s", obj.obj_id, pnode.tag)
+ if not self._adjust_children(obj):
+ return None
+ pnode.append(node)
+ self._redirect_children_constraints(obj)
+ obj.normalize_parameters()
+ if not obj.cli_use_validate():
+ self.nocli_warn = True
+ obj.nocli = True
+ self._update_links(obj)
+ obj.origin = "user"
+ self.cib_objects.append(obj)
+ return obj
+
+ def _add_children(self, obj_type, node):
+ """
+ Called from create_from_node
+ In case this is a clone/group/master create from XML,
+ and the child node(s) haven't been added as a separate objects.
+ """
+ if obj_type not in constants.container_tags:
+ return True
+
+ # bsc#959895: also process cloned groups
+ for c in node.iterchildren():
+ if c.tag not in ('primitive', 'group'):
+ continue
+ pid = c.get('id')
+ child_obj = self.find_resource(pid)
+ if child_obj is None:
+ child_obj = self.create_from_node(copy.deepcopy(c))
+ if not child_obj:
+ return False
+ return True
+
+ def create_from_node(self, node):
+ 'Create a new cib object from a document node.'
+ if node is None:
+ logger.debug("create_from_node: got None")
+ return None
+ try:
+ obj_type = cib_object_map[node.tag][0]
+ except KeyError:
+ logger.debug("create_from_node: keyerror (%s)", node.tag)
+ return None
+ if is_defaults(node):
+ node = get_rscop_defaults_meta_node(node)
+ if node is None:
+ logger.debug("create_from_node: get_rscop_defaults_meta_node failed")
+ return None
+
+ if not self._add_children(obj_type, node):
+ return None
+
+ obj = self.new_object(obj_type, node.get("id"))
+ if not obj:
+ return None
+ return self._add_element(obj, node)
+
+ def _remove_obj(self, obj):
+ "Remove a cib object."
+ logger.debug("remove object %s", str(obj))
+ for child in obj.children:
+ # just relink, don't remove children
+ self._relink_child_to_top(child)
+ if obj.parent: # remove obj from its parent, if any
+ obj.parent.children.remove(obj)
+ idmgmt.remove_xml(obj.node)
+ rmnode(obj.node)
+ self._add_to_remove_queue(obj)
+ self.cib_objects.remove(obj)
+ for tag in self.related_tags(obj):
+ # remove self from tag
+ # remove tag if self is last tagged object in tag
+ selfies = [x for x in tag.node.iterchildren() if x.get('id') == obj.obj_id]
+ for c in selfies:
+ rmnode(c)
+ if not tag.node.xpath('./obj_ref'):
+ self._remove_obj(tag)
+ if not self._no_constraint_rm_msg:
+ logger.info("hanging %s deleted", str(tag))
+ for c_obj in self.related_constraints(obj):
+ if is_simpleconstraint(c_obj.node) and obj.children:
+ # the first child inherits constraints
+ rename_rscref(c_obj, obj.obj_id, obj.children[0].obj_id)
+ deleted = False
+ if delete_rscref(c_obj, obj.obj_id):
+ deleted = True
+ if silly_constraint(c_obj.node, obj.obj_id):
+ # remove invalid constraints
+ self._remove_obj(c_obj)
+ if not self._no_constraint_rm_msg:
+ logger.info("hanging %s deleted", str(c_obj))
+ elif deleted:
+ logger.info("constraint %s updated", str(c_obj))
+
+ def related_tags(self, obj):
+ def related_tag(tobj):
+ if tobj.obj_type != 'tag':
+ return False
+ for c in tobj.node.iterchildren():
+ if c.get('id') == obj.obj_id:
+ return True
+ return False
+ return [x for x in self.cib_objects if related_tag(x)]
+
+ def related_constraints(self, obj):
+ def related_constraint(obj2):
+ return is_constraint(obj2.node) and rsc_constraint(obj.obj_id, obj2.node)
+ if not is_resource(obj.node):
+ return []
+ return [x for x in self.cib_objects if related_constraint(x)]
+
+ def related_elements(self, obj):
+ "Both constraints, groups, tags, ..."
+ if not is_resource(obj.node):
+ return []
+ return [x for x in self.cib_objects if is_related(obj.obj_id, x.node)]
+
+ def _redirect_children_constraints(self, obj):
+ '''
+ Redirect constraints to the new parent
+ '''
+ for child in obj.children:
+ for c_obj in self.related_constraints(child):
+ rename_rscref(c_obj, child.obj_id, obj.obj_id)
+ # drop useless constraints which may have been created above
+ for c_obj in self.related_constraints(obj):
+ if silly_constraint(c_obj.node, obj.obj_id):
+ self._no_constraint_rm_msg = True
+ self._remove_obj(c_obj)
+ self._no_constraint_rm_msg = False
+
+ def template_primitives(self, obj):
+ if not is_template(obj.node):
+ return []
+ c_list = []
+ for obj2 in self.cib_objects:
+ if not is_primitive(obj2.node):
+ continue
+ if obj2.node.get("template") == obj.obj_id:
+ c_list.append(obj2)
+ return c_list
+
+ def _check_running_primitives(self, prim_l):
+ rscstat = RscState()
+ for prim in prim_l:
+ if not rscstat.can_delete(prim.obj_id):
+ logger.error("resource %s is running, can't delete it", prim.obj_id)
+ return False
+ return True
+
+ def _add_to_remove_queue(self, obj):
+ if obj.origin == "cib":
+ self.remove_queue.append(obj)
+
+ def _delete_1(self, obj):
+ '''
+ Remove an object and its parent in case the object is the
+ only child.
+ '''
+ if obj.parent and len(obj.parent.children) == 1:
+ self._delete_1(obj.parent)
+ if obj in self.cib_objects: # don't remove parents twice
+ self._remove_obj(obj)
+
+ def delete(self, *args):
+ 'Delete a cib object.'
+ if not self.is_cib_sane():
+ return False
+ rc = True
+ l = []
+ rscstat = RscState()
+ for obj_id in args:
+ obj = self.find_object(obj_id)
+ if not obj:
+ # If --force is set:
+ # Unless something more serious goes wrong here,
+ # don't return an error code if the object
+ # to remove doesn't exist. This should help scripted
+ # workflows without compromising an interactive
+ # use.
+ if not config.core.force:
+ logger_utils.no_object_err(obj_id)
+ rc = False
+ continue
+ if not rscstat.can_delete(obj_id):
+ logger.error("resource %s is running, can't delete it", obj_id)
+ rc = False
+ continue
+ if is_template(obj.node):
+ prim_l = self.template_primitives(obj)
+ prim_l = [x for x in prim_l
+ if x not in l and x.obj_id not in args]
+ if not self._check_running_primitives(prim_l):
+ rc = False
+ continue
+ for prim in prim_l:
+ logger.info("hanging %s deleted", str(prim))
+ l.append(prim)
+ l.append(obj)
+ if l:
+ l = processing_sort_cli(l)
+ for obj in reversed(l):
+ self._delete_1(obj)
+ return rc
+
+ def rename(self, old_id, new_id):
+ '''
+ Rename a cib object.
+ - check if the resource (if it's a resource) is stopped
+ - check if the new id is not taken
+ - find the object with old id
+ - rename old id to new id in all related objects
+ (constraints)
+ - if the object came from the CIB, then it must be
+ deleted and the one with the new name created
+ - rename old id to new id in the object
+ '''
+ if not self.is_cib_sane() or not new_id:
+ return False
+ if idmgmt.id_in_use(new_id):
+ return False
+ obj = self.find_object(old_id)
+ if not obj:
+ logger_utils.no_object_err(old_id)
+ return False
+ if not obj.can_be_renamed():
+ return False
+ for c_obj in self.related_constraints(obj):
+ rename_rscref(c_obj, old_id, new_id)
+ rename_id(obj.node, old_id, new_id)
+ obj.obj_id = new_id
+ idmgmt.rename(old_id, new_id)
+ # FIXME: (bnc#901543)
+ # for each child node; if id starts with "%(old_id)s-" and
+ # is not referenced by anything, change that id as well?
+ # otherwise inner ids will resemble old name, not new
+ obj.set_updated()
+
+ def erase(self):
+ "Remove all cib objects."
+ # remove only bottom objects and no constraints
+ # the rest will automatically follow
+ if not self.is_cib_sane():
+ return False
+ erase_ok = True
+ l = []
+ rscstat = RscState()
+ for obj in [obj for obj in self.cib_objects if not obj.children and not is_constraint(obj.node) and obj.obj_type != "node"]:
+ if not rscstat.can_delete(obj.obj_id):
+ logger.warning("resource %s is running, can't delete it", obj.obj_id)
+ erase_ok = False
+ else:
+ l.append(obj)
+ if not erase_ok:
+ logger.error("CIB erase aborted (nothing was deleted)")
+ return False
+ self._no_constraint_rm_msg = True
+ for obj in l:
+ self.delete(obj.obj_id)
+ self._no_constraint_rm_msg = False
+ remaining = 0
+ for obj in self.cib_objects:
+ if obj.obj_type != "node":
+ remaining += 1
+ if remaining > 0:
+ logger.error("strange, but these objects remained:")
+ for obj in self.cib_objects:
+ if obj.obj_type != "node":
+ print(str(obj), file=sys.stderr)
+ self.cib_objects = []
+ return True
+
+ def erase_nodes(self):
+ "Remove nodes only."
+ if not self.is_cib_sane():
+ return False
+ l = [obj for obj in self.cib_objects if obj.obj_type == "node"]
+ for obj in l:
+ self.delete(obj.obj_id)
+
+ def refresh(self):
+ "Refresh from the CIB."
+ self.reset()
+ self.initialize()
+ return self.is_cib_sane()
+
+
+cib_factory = CibFactory()
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/cibstatus.py b/crmsh/cibstatus.py
new file mode 100644
index 0000000..9566022
--- /dev/null
+++ b/crmsh/cibstatus.py
@@ -0,0 +1,391 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import os
+from tempfile import mkstemp
+from lxml import etree
+from . import tmpfiles
+from . import xmlutil
+from . import utils
+from . import config
+from .utils import ext_cmd, show_dot_graph, page_string
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+def get_tag_by_id(node, tag, ident):
+ "Find a doc node which matches tag and id."
+ for n in node.xpath(".//%s" % tag):
+ if n.get("id") == ident:
+ return n
+ return None
+
+
+def get_status_node_id(n):
+ try:
+ n = n.getparent()
+ except:
+ return None
+ if n.tag != "node_state":
+ return get_status_node_id(n)
+ return n.get("id")
+
+
+def get_status_node(status_node, node):
+ for n in status_node.iterchildren("node_state"):
+ if n.get("id") == node:
+ return n
+ return None
+
+
+def get_status_ops(status_node, rsc, op, interval, node=''):
+ '''
+ Find a doc node which matches the operation. interval set to
+ "-1" means to lookup an operation with non-zero interval (for
+ monitors). Empty interval means any interval is fine.
+ '''
+ l = []
+ for n in status_node.iterchildren("node_state"):
+ if node is not None and n.get("id") != node:
+ continue
+ for r in n.iterchildren("lrm_resource"):
+ if r.get("id") != rsc:
+ continue
+ for o in r.iterchildren("lrm_rsc_op"):
+ if o.get("operation") != op:
+ continue
+ iv = o.get("interval")
+ if iv == interval or (interval == "-1" and iv != "0"):
+ l.append(o)
+ return l
+
+
+def split_op(op):
+ if op == "probe":
+ return "monitor", "0"
+ elif op == "monitor":
+ return "monitor", "-1"
+ elif op[0:8] == "monitor:":
+ return "monitor", op[8:]
+ return op, "0"
+
+
+def cib_path(source):
+ return source[0:7] == "shadow:" and xmlutil.shadowfile(source[7:]) or source
+
+
+class CibStatus(object):
+ '''
+ CIB status management
+ '''
+ cmd_inject = "</dev/null >/dev/null 2>&1 crm_simulate -x %s -I %s"
+ cmd_run = "2>&1 crm_simulate -R -x %s"
+ cmd_simulate = "2>&1 crm_simulate -S -x %s"
+ node_ops = {
+ "online": "-u",
+ "offline": "-d",
+ "unclean": "-f",
+ }
+ ticket_ops = {
+ "grant": "-g",
+ "revoke": "-r",
+ "standby": "-b",
+ "activate": "-e",
+ }
+
+ def __init__(self):
+ self.origin = ""
+ self.backing_file = "" # file to keep the live cib
+ self.status_node = None
+ self.cib = None
+ self.reset_state()
+
+ def _cib_path(self, source):
+ if source[0:7] == "shadow:":
+ return xmlutil.shadowfile(source[7:])
+ else:
+ return source
+
+ def _load_cib(self, source):
+ if source == "live":
+ if not self.backing_file:
+ self.backing_file = xmlutil.cibdump2tmp()
+ if not self.backing_file:
+ return None
+ tmpfiles.add(self.backing_file)
+ else:
+ xmlutil.cibdump2file(self.backing_file)
+ f = self.backing_file
+ else:
+ f = cib_path(source)
+ return xmlutil.read_cib(xmlutil.file2cib_elem, f)
+
+ def _load(self, source):
+ cib = self._load_cib(source)
+ if cib is None:
+ return False
+ status = cib.find("status")
+ if status is None:
+ return False
+ self.cib = cib
+ self.status_node = status
+ self.reset_state()
+ return True
+
+ def reset_state(self):
+ self.modified = False
+ self.quorum = ''
+ self.node_changes = {}
+ self.op_changes = {}
+ self.ticket_changes = {}
+
+ def initialize(self):
+ src = utils.get_cib_in_use()
+ if not src:
+ src = "live"
+ else:
+ src = "shadow:" + src
+ if self._load(src):
+ self.origin = src
+
+ def source_file(self):
+ if self.origin == "live":
+ return self.backing_file
+ else:
+ return cib_path(self.origin)
+
+ def status_node_list(self):
+ st = self.get_status()
+ if st is None:
+ return
+ return [x.get("id") for x in st.xpath(".//node_state")]
+
+ def status_rsc_list(self):
+ st = self.get_status()
+ if st is None:
+ return
+ rsc_list = [x.get("id") for x in st.xpath(".//lrm_resource")]
+ # how to uniq?
+ d = {}
+ for e in rsc_list:
+ d[e] = 0
+ return list(d.keys())
+
+ def load(self, source):
+ '''
+ Load the status section from the given source. The source
+ may be cluster ("live"), shadow CIB, or CIB in a file.
+ '''
+ if self.backing_file:
+ os.unlink(self.backing_file)
+ self.backing_file = ""
+ if not self._load(source):
+ logger.error("the cib contains no status")
+ return False
+ self.origin = source
+ return True
+
+ def save(self, dest=None):
+ '''
+ Save the modified status section to a file/shadow. If the
+ file exists, then it must be a cib file and the status
+ section is replaced with our status section. If the file
+ doesn't exist, then our section and some (?) configuration
+ is saved.
+ '''
+ if not self.modified:
+ logger.info("apparently you didn't modify status")
+ return False
+ if (not dest and self.origin == "live") or dest == "live":
+ logger.warning("cannot save status to the cluster")
+ return False
+ cib = self.cib
+ if dest:
+ dest_path = cib_path(dest)
+ if os.path.isfile(dest_path):
+ cib = self._load_cib(dest)
+ if cib is None:
+ logger.error("%s exists, but no cib inside", dest)
+ return False
+ else:
+ dest_path = cib_path(self.origin)
+ if cib != self.cib:
+ status = cib.find("status")
+ xmlutil.rmnode(status)
+ cib.append(self.status_node)
+ xml = etree.tostring(cib)
+ try:
+ f = open(dest_path, "w")
+ except IOError as msg:
+ logger.error(msg)
+ return False
+ f.write(xml)
+ f.close()
+ return True
+
+ def _crm_simulate(self, cmd, nograph, scores, utilization, verbosity):
+ if not self.origin:
+ self.initialize()
+ if verbosity:
+ cmd = "%s -%s" % (cmd, verbosity.upper())
+ if scores:
+ cmd = "%s -s" % cmd
+ if utilization:
+ cmd = "%s -U" % cmd
+ if config.core.dotty and not nograph:
+ fd, dotfile = mkstemp()
+ cmd = "%s -D %s" % (cmd, dotfile)
+ else:
+ dotfile = None
+ rc = ext_cmd(cmd % self.source_file())
+ if dotfile:
+ show_dot_graph(dotfile)
+ return rc == 0
+
+ # actions is ignored
+ def run(self, nograph, scores, utilization, actions, verbosity):
+ return self._crm_simulate(self.cmd_run,
+ nograph, scores, utilization, verbosity)
+
+ # actions is ignored
+ def simulate(self, nograph, scores, utilization, actions, verbosity):
+ return self._crm_simulate(self.cmd_simulate,
+ nograph, scores, utilization, verbosity)
+
+ def get_status(self):
+ '''
+ Return the status section node.
+ '''
+ if not self.origin:
+ self.initialize()
+ if (self.status_node is None or (self.origin == "live" and not self.modified)) and not self._load(self.origin):
+ return None
+ return self.status_node
+
+ def list_changes(self):
+ '''
+ Dump a set of changes done.
+ '''
+ if not self.modified:
+ return True
+ for node in self.node_changes:
+ print(node, self.node_changes[node])
+ for op in self.op_changes:
+ print(op, self.op_changes[op])
+ for ticket in self.ticket_changes:
+ print(ticket, self.ticket_changes[ticket])
+ if self.quorum:
+ print("quorum:", self.quorum)
+ return True
+
+ def show(self):
+ '''
+ Page the "pretty" XML of the status section.
+ '''
+ if self.get_status() is None:
+ return False
+ page_string(xmlutil.xml_tostring(self.status_node, pretty_print=True))
+ return True
+
+ def inject(self, opts):
+ return ext_cmd("%s %s" %
+ (self.cmd_inject % (self.source_file(), self.source_file()), opts))
+
+ def set_quorum(self, v):
+ if not self.origin:
+ self.initialize()
+ rc = self.inject("--quorum=%s" % (v and "true" or "false"))
+ if rc != 0:
+ return False
+ self._load(self.origin)
+ self.quorum = v and "true" or "false"
+ self.modified = True
+ return True
+
+ def edit_node(self, node, state):
+ '''
+ Modify crmd, expected, and join attributes of node_state
+ to set the node's state to online, offline, or unclean.
+ '''
+ if self.get_status() is None:
+ return False
+ if state not in self.node_ops:
+ logger.error("unknown state %s", state)
+ return False
+ node_node = get_tag_by_id(self.status_node, "node_state", node)
+ if node_node is None:
+ logger.info("node %s created", node)
+ return False
+ rc = self.inject("%s %s" % (self.node_ops[state], node))
+ if rc != 0:
+ return False
+ self._load(self.origin)
+ self.node_changes[node] = state
+ self.modified = True
+ return True
+
+ def edit_ticket(self, ticket, subcmd):
+ '''
+ Modify ticket status.
+ '''
+ if self.get_status() is None:
+ return False
+ if subcmd not in self.ticket_ops:
+ logger.error("unknown ticket command %s", subcmd)
+ return False
+ rc = self.inject("%s %s" % (self.ticket_ops[subcmd], ticket))
+ if rc != 0:
+ return False
+ self._load(self.origin)
+ self.ticket_changes[ticket] = subcmd
+ self.modified = True
+ return True
+
+ def edit_op(self, op, rsc, rc_code, op_status, node=''):
+ '''
+ Set rc-code and op-status in the lrm_rsc_op status
+ section element.
+ '''
+ if self.get_status() is None:
+ return False
+ l_op, l_int = split_op(op)
+ op_nodes = get_status_ops(self.status_node, rsc, l_op, l_int, node)
+ if l_int == "-1" and len(op_nodes) != 1:
+ logger.error("need interval for the monitor op")
+ return False
+ if node == '' and len(op_nodes) != 1:
+ if op_nodes:
+ nodelist = [get_status_node_id(x) for x in op_nodes]
+ logger.error("operation %s found at %s", op, ' '.join(nodelist))
+ else:
+ logger.error("operation %s not found", op)
+ return False
+ # either the op is fully specified (maybe not found)
+ # or we found exactly one op_node
+ if len(op_nodes) == 1:
+ op_node = op_nodes[0]
+ if not node:
+ node = get_status_node_id(op_node)
+ if not node:
+ logger.error("node not found for the operation %s", op)
+ return False
+ if l_int == "-1":
+ l_int = op_node.get("interval")
+ op_op = op_status == "0" and "-i" or "-F"
+ rc = self.inject("%s %s_%s_%s@%s=%s" %
+ (op_op, rsc, l_op, l_int, node, rc_code))
+ if rc != 0:
+ return False
+ self.op_changes[node+":"+rsc+":"+op] = "rc="+rc_code
+ if op_status:
+ self.op_changes[node+":"+rsc+":"+op] += "," "op-status="+op_status
+ self._load(self.origin)
+ self.modified = True
+ return True
+
+
+cib_status = CibStatus()
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/cibverify.py b/crmsh/cibverify.py
new file mode 100644
index 0000000..a40dda5
--- /dev/null
+++ b/crmsh/cibverify.py
@@ -0,0 +1,32 @@
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import re
+from .sh import ShellUtils
+from . import log
+
+
+logger = log.setup_logger(__name__)
+cib_verify = "crm_verify -VV -p"
+VALIDATE_RE = re.compile(r"^Entity: line (\d)+: element (\w+): " +
+ r"Relax-NG validity error : (.+)$")
+
+
+def _prettify(line, indent=0):
+ m = VALIDATE_RE.match(line)
+ if m:
+ return "%s%s (%s): %s" % (indent*' ', m.group(2), m.group(1), m.group(3))
+ return line
+
+
+def verify(cib):
+ rc, _, stderr = ShellUtils().get_stdout_stderr(cib_verify, cib.encode('utf-8'))
+ for i, line in enumerate(line for line in stderr.split('\n') if line):
+ if i == 0:
+ if "warning:" in line:
+ logger.warning(_prettify(line, 0))
+ else:
+ logger.error(_prettify(line, 0))
+ else:
+ logger.error(_prettify(line, 7))
+ return rc
diff --git a/crmsh/clidisplay.py b/crmsh/clidisplay.py
new file mode 100644
index 0000000..fdcc50d
--- /dev/null
+++ b/crmsh/clidisplay.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+"""
+Display output for various syntax elements.
+"""
+
+from contextlib import contextmanager
+
+from . import config
+
+
+# Enable colors/upcasing
+_pretty = True
+
+
+def enable_pretty():
+ global _pretty
+ _pretty = True
+
+
+def disable_pretty():
+ global _pretty
+ _pretty = False
+
+
+@contextmanager
+def nopretty(cond=True):
+ if cond:
+ disable_pretty()
+ try:
+ yield
+ finally:
+ if cond:
+ enable_pretty()
+
+
+def colors_enabled():
+ return 'color' in config.color.style and _pretty
+
+
+def _colorize(s, colors):
+ if s and colors_enabled():
+ return ''.join(('${%s}' % clr.upper()) for clr in colors) + s + '${NORMAL}'
+ return s
+
+
+def error(s):
+ return _colorize(s, config.color.error)
+
+
+def ok(s):
+ return _colorize(s, config.color.ok)
+
+
+def info(s):
+ return _colorize(s, config.color.info)
+
+
+def warn(s):
+ return _colorize(s, config.color.warn)
+
+
+def keyword(s):
+ if "uppercase" in config.color.style:
+ s = s.upper()
+ if "color" in config.color.style:
+ s = _colorize(s, config.color.keyword)
+ return s
+
+
+def prompt(s):
+ if colors_enabled():
+ s = "${RLIGNOREBEGIN}${GREEN}${BOLD}${RLIGNOREEND}" + s
+ return s + "${RLIGNOREBEGIN}${NORMAL}${RLIGNOREEND}"
+ return s
+
+
+def prompt_noreadline(s):
+ if colors_enabled():
+ return "${GREEN}${BOLD}" + s + "${NORMAL}"
+ return s
+
+
+def help_header(s):
+ return _colorize(s, config.color.help_header)
+
+
+def help_keyword(s):
+ return _colorize(s, config.color.help_keyword)
+
+
+def help_topic(s):
+ return _colorize(s, config.color.help_topic)
+
+
+def help_block(s):
+ return _colorize(s, config.color.help_block)
+
+
+def ident(s):
+ return _colorize(s, config.color.identifier)
+
+
+def attr_name(s):
+ return _colorize(s, config.color.attr_name)
+
+
+def attr_value(s):
+ return _colorize(s, config.color.attr_value)
+
+
+def rscref(s):
+ return _colorize(s, config.color.resource_reference)
+
+
+def idref(s):
+ return _colorize(s, config.color.id_reference)
+
+
+def score(s):
+ return _colorize(s, config.color.score)
+
+
+def ticket(s):
+ return _colorize(s, config.color.ticket)
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/cliformat.py b/crmsh/cliformat.py
new file mode 100644
index 0000000..15f1241
--- /dev/null
+++ b/crmsh/cliformat.py
@@ -0,0 +1,371 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+from . import constants
+from . import clidisplay
+from . import utils
+from . import xmlutil
+
+
+#
+# CLI format generation utilities (from XML)
+#
+def cli_format(pl, break_lines=True, xml=False):
+ if break_lines and xml:
+ return ' \\\n'.join(pl)
+ elif break_lines:
+ return ' \\\n\t'.join(pl)
+ else:
+ return ' '.join(pl)
+
+
+def head_id_format(nodeid):
+ "Special format for property list / node id"
+ if utils.noquotes(nodeid):
+ return "%s:" % (clidisplay.ident(nodeid))
+ return '%s="%s"' % (clidisplay.ident('$id'),
+ clidisplay.attr_value(nodeid))
+
+
+def quote_wrap(v):
+ if utils.noquotes(v):
+ return v
+ elif '"' in v:
+ return '"%s"' % v.replace('"', '\\"')
+ else:
+ return '"%s"' % v
+
+
+def nvpair_format(n, v):
+ if v is None:
+ return clidisplay.attr_name(n)
+ else:
+ return '='.join((clidisplay.attr_name(n),
+ clidisplay.attr_value(quote_wrap(v))))
+
+
+def cli_nvpair(nvp):
+ 'Converts an nvpair tag or a (name, value) pair to CLI syntax'
+ from .cibconfig import cib_factory
+ from .utils import obscured
+ nodeid = nvp.get('id')
+ idref = nvp.get('id-ref')
+ name = nvp.get('name')
+ value = nvp.get('value')
+ value = obscured(name, value)
+ if idref is not None:
+ if name is not None:
+ return '@%s:%s' % (idref, name)
+ return '@%s' % (idref)
+ elif nodeid is not None and cib_factory.is_id_refd(nvp.tag, nodeid):
+ return '$%s:%s' % (nodeid, nvpair_format(name, value))
+ return nvpair_format(name, value)
+
+
+def cli_nvpairs(nvplist):
+ 'Return a string of name="value" pairs (passed in a list of nvpairs).'
+ return ' '.join([cli_nvpair(nvp) for nvp in nvplist])
+
+
+def nvpairs2list(node, add_id=False):
+ '''
+ Convert an attribute node to a list of nvpairs.
+ Also converts an id-ref or id into plain nvpairs.
+ The id attribute is normally skipped, since they tend to be
+ long and therefore obscure the relevant content. For some
+ elements, however, they are included (e.g. properties).
+ '''
+ ret = []
+ if 'id-ref' in node:
+ ret.append(xmlutil.nvpair('$id-ref', node.get('id-ref')))
+ nvpairs = node.xpath('./nvpair | ./attributes/nvpair')
+ if 'id' in node and (add_id or len(nvpairs) == 0):
+ ret.append(xmlutil.nvpair('$id', node.get('id')))
+ ret.extend(nvpairs)
+ return ret
+
+
+def date_exp2cli(node):
+ kwmap = {'in_range': 'in', 'date_spec': 'spec'}
+ l = []
+ operation = node.get("operation")
+ l.append(clidisplay.keyword("date"))
+ l.append(clidisplay.keyword(kwmap.get(operation, operation)))
+ if operation in utils.olist(constants.simple_date_ops):
+ value = node.get(utils.keyword_cmp(operation, 'lt') and "end" or "start")
+ l.append(clidisplay.attr_value(quote_wrap(value)))
+ else:
+ if operation == 'in_range':
+ for name in constants.in_range_attrs:
+ if name in node.attrib:
+ l.append(nvpair_format(name, node.attrib[name]))
+ for c in node.iterchildren():
+ if c.tag in ("duration", "date_spec"):
+ l.extend([nvpair_format(name, c.get(name))
+ for name in list(c.keys()) if name != 'id'])
+ return ' '.join(l)
+
+
+def binary_op_format(op):
+ l = op.split(':')
+ if len(l) == 2:
+ return "%s:%s" % (l[0], clidisplay.keyword(l[1]))
+ else:
+ return clidisplay.keyword(op)
+
+
+def exp2cli(node):
+ operation = node.get("operation")
+ typ = node.get("type")
+ if typ:
+ operation = "%s:%s" % (typ, operation)
+ attribute = node.get("attribute")
+ value = node.get("value")
+ if not value:
+ return "%s %s" % (binary_op_format(operation), attribute)
+ else:
+ value_source = node.get("value-source")
+ if not value_source or value_source == "literal":
+ return "%s %s %s" % (attribute, binary_op_format(operation), value)
+ else:
+ return "%s %s %s{%s}" % (attribute, binary_op_format(operation), value_source, value)
+
+
+def abs_pos_score(score):
+ return score in ("inf", "+inf", "Mandatory")
+
+
+def get_kind(node):
+ kind = node.get("kind")
+ if not kind:
+ kind = ""
+ return kind
+
+
+def get_score(node):
+ score = node.get("score")
+ if not score:
+ score = node.get("score-attribute")
+ else:
+ if score.find("INFINITY") >= 0:
+ score = score.replace("INFINITY", "inf")
+ if not score:
+ score = ""
+ return score
+
+
+def cli_rule_score(node):
+ score = node.get("score")
+ if score == "INFINITY":
+ return None
+ return get_score(node)
+
+
+def cli_exprs(node):
+ bool_op = node.get("boolean-op")
+ if not bool_op:
+ bool_op = "and"
+ exp = []
+ for c in node.iterchildren():
+ if c.tag == "date_expression":
+ exp.append(date_exp2cli(c))
+ elif c.tag == "expression":
+ exp.append(exp2cli(c))
+ return (" %s " % clidisplay.keyword(bool_op)).join(exp)
+
+
+def cli_rule(node):
+ from .cibconfig import cib_factory
+ s = []
+ node_id = node.get("id")
+ if node_id and cib_factory.is_id_refd(node.tag, node_id):
+ s.append(nvpair_format('$id', node_id))
+ else:
+ idref = node.get("id-ref")
+ if idref:
+ return nvpair_format('$id-ref', idref)
+ rsc_role = node.get("role")
+ if rsc_role:
+ s.append(nvpair_format('$role', rsc_role))
+ score = cli_rule_score(node)
+ if score:
+ s.append("%s:" % (clidisplay.score(score)))
+ s.append(cli_exprs(node))
+ return ' '.join(s)
+
+
+def mkrscrole(node, n):
+ rsc = clidisplay.rscref(node.get(n))
+ rsc_role = node.get(n + "-role")
+ rsc_instance = node.get(n + "-instance")
+ if rsc_role:
+ return "%s:%s" % (rsc, rsc_role)
+ elif rsc_instance:
+ return "%s:%s" % (rsc, rsc_instance)
+ else:
+ return rsc
+
+
+def mkrscaction(node, n):
+ rsc = clidisplay.rscref(node.get(n))
+ rsc_action = node.get(n + "-action")
+ rsc_instance = node.get(n + "-instance")
+ if rsc_action:
+ return "%s:%s" % (rsc, rsc_action)
+ elif rsc_instance:
+ return "%s:%s" % (rsc, rsc_instance)
+ else:
+ return rsc
+
+
+def cli_path(p):
+ return clidisplay.attr_value(quote_wrap(p))
+
+
+def boolean_maybe(v):
+ "returns True/False or None"
+ if v is None:
+ return None
+ return utils.get_boolean(v)
+
+
+def rsc_set_constraint(node, obj_type):
+ col = []
+ cnt = 0
+ for n in node.findall("resource_set"):
+ sequential = boolean_maybe(n.get("sequential"))
+ require_all = boolean_maybe(n.get("require-all"))
+ if require_all is False:
+ col.append("[")
+ elif sequential is False:
+ col.append("(")
+ role = n.get("role")
+ action = n.get("action")
+ for r in n.findall("resource_ref"):
+ rsc = clidisplay.rscref(r.get("id"))
+ q = (obj_type == "order") and action or role
+ col.append(q and "%s:%s" % (rsc, q) or rsc)
+ cnt += 1
+ if require_all is False:
+ if sequential in (None, True):
+ col.append(nvpair_format('sequential', 'true'))
+ col.append("]")
+ elif sequential is False:
+ if require_all is False:
+ col.append(nvpair_format('require-all', 'false'))
+ col.append(")")
+ is_ticket = obj_type == 'rsc_ticket'
+ is_location = obj_type == 'location'
+ is_seq_all = sequential in (None, True) and require_all in (None, True)
+ if not is_location and ((is_seq_all and not is_ticket and cnt <= 2) or
+ (is_ticket and cnt <= 1)): # a degenerate thingie
+ col.insert(0, "_rsc_set_")
+ return col
+
+
+def simple_rsc_constraint(node, obj_type):
+ col = []
+ if obj_type == "colocation":
+ col.append(mkrscrole(node, "rsc"))
+ col.append(mkrscrole(node, "with-rsc"))
+ elif obj_type == "order":
+ col.append(mkrscaction(node, "first"))
+ col.append(mkrscaction(node, "then"))
+ else: # rsc_ticket
+ col.append(mkrscrole(node, "rsc"))
+ return col
+
+
+# this pre (or post)-processing is oversimplified
+# but it will do for now
+# (a shortcut with more than one placeholder in a single expansion
+# cannot have more than one expansion)
+# ("...'@@'...'@@'...","...") <- that won't work
+def build_exp_re(exp_l):
+ return [x.replace(r'@@', r'([a-zA-Z_][a-zA-Z0-9_.-]*)') for x in exp_l]
+
+
+def match_acl_shortcut(xpath, re_l):
+ import re
+ for i in range(len(re_l)):
+ s = ''.join(re_l[0:i+1])
+ r = re.match(s + r"$", xpath)
+ if r:
+ return (True, r.groups()[0:i+1])
+ return (False, None)
+
+
+def find_acl_shortcut(xpath):
+ for shortcut in constants.acl_shortcuts:
+ l = build_exp_re(constants.acl_shortcuts[shortcut])
+ (ec, spec_l) = match_acl_shortcut(xpath, l)
+ if ec:
+ return (shortcut, spec_l)
+ return (None, None)
+
+
+def acl_spec_format(xml_spec, v):
+ key_f = clidisplay.keyword(constants.acl_spec_map[xml_spec])
+ if xml_spec == "xpath":
+ (shortcut, spec_l) = find_acl_shortcut(v)
+ if shortcut:
+ key_f = clidisplay.keyword(shortcut)
+ v_f = ':'.join([clidisplay.attr_value(x) for x in spec_l])
+ else:
+ v_f = clidisplay.attr_value(quote_wrap(v))
+ elif xml_spec == "ref":
+ v_f = '%s' % clidisplay.attr_value(v)
+ else: # tag and attribute
+ v_f = '%s' % clidisplay.attr_value(v)
+ return v_f and '%s:%s' % (key_f, v_f) or key_f
+
+
+def cli_acl_rule(node, format_mode=1):
+ l = []
+ acl_rule_name = node.tag
+ l.append(clidisplay.keyword(acl_rule_name))
+ for xml_spec in constants.acl_spec_map:
+ v = node.get(xml_spec)
+ if v:
+ l.append(acl_spec_format(xml_spec, v))
+ return ' '.join(l)
+
+
+def cli_acl_roleref(node, format_mode=1):
+ return "%s:%s" % (clidisplay.keyword("role"),
+ clidisplay.attr_value(node.get("id")))
+
+
+def cli_acl_role(node):
+ return clidisplay.attr_value(node.get("id"))
+
+
+def cli_acl_spec2_format(xml_spec, v):
+ key_f = clidisplay.keyword(xml_spec)
+ if xml_spec == "xpath":
+ (shortcut, spec_l) = find_acl_shortcut(v)
+ if shortcut:
+ key_f = clidisplay.keyword(shortcut)
+ v_f = ':'.join([clidisplay.attr_value(x) for x in spec_l])
+ else:
+ v_f = clidisplay.attr_value(quote_wrap(v))
+ else: # ref, type and attr
+ v_f = clidisplay.attr_value(v)
+ return v_f and '%s:%s' % (key_f, v_f) or key_f
+
+
+def cli_acl_permission(node):
+ s = [clidisplay.keyword(node.get('kind'))]
+ # if node.get('id'):
+ # s.append(head_id_format(node.get('id')))
+ if node.get('description'):
+ s.append(nvpair_format('description', node.get('description')))
+ for attrname, cliname in constants.acl_spec_map_2_rev:
+ if attrname in node.attrib:
+ s.append(cli_acl_spec2_format(cliname, node.get(attrname)))
+ return ' '.join(s)
+
+#
+################################################################
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/cmd_status.py b/crmsh/cmd_status.py
new file mode 100644
index 0000000..4d80a0c
--- /dev/null
+++ b/crmsh/cmd_status.py
@@ -0,0 +1,145 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import re
+from . import clidisplay
+from . import utils
+from .sh import ShellUtils
+
+_crm_mon = None
+
+_WARNS = ['pending',
+ 'complete',
+ 'Timed Out',
+ 'NOT SUPPORTED',
+ 'Error',
+ 'Not installed',
+ r'UNKNOWN\!',
+ 'Stopped',
+ 'standby',
+ 'WITHOUT quorum']
+_OKS = ['Masters', 'Slaves', 'Started', 'Master', 'Slave', 'Online', 'online', 'ok', 'master',
+ 'with quorum']
+_ERRORS = ['not running',
+ 'unknown error',
+ 'invalid parameter',
+ 'unimplemented feature',
+ 'insufficient privileges',
+ 'not installed',
+ 'not configured',
+ 'not running',
+ r'master \(failed\)',
+ 'OCF_SIGNAL',
+ 'OCF_NOT_SUPPORTED',
+ 'OCF_TIMEOUT',
+ 'OCF_OTHER_ERROR',
+ 'OCF_DEGRADED',
+ 'OCF_DEGRADED_MASTER',
+ 'unknown',
+ 'Unknown',
+ 'OFFLINE',
+ 'Failed actions']
+
+
+class CrmMonFilter(object):
+ _OK = re.compile(r'(%s)' % '|'.join(r"(?:\b%s\b)" % (w) for w in _OKS))
+ _WARNS = re.compile(r'(%s)' % '|'.join(_WARNS))
+ _ERROR = re.compile(r'(%s)' % ('|'.join(_ERRORS)))
+ _NODES = re.compile(r'(\d+ Nodes configured)')
+ _RESOURCES = re.compile(r'(\d+ Resources configured)')
+
+ _RESOURCE = re.compile(r'(\S+)(\s+)\((\S+:\S+)\):')
+ _GROUP = re.compile(r'((?:Resource Group)|(?:Clone Set)|(?:Master/Slave Set)): (\S+)')
+
+ def _filter(self, line):
+ line = self._RESOURCE.sub("%s%s(%s):" % (clidisplay.help_header(r'\1'),
+ r'\2',
+ r'\3'), line)
+ line = self._NODES.sub(clidisplay.help_header(r'\1'), line)
+ line = self._RESOURCES.sub(clidisplay.help_header(r'\1'), line)
+ line, ngroups = self._GROUP.subn(r'\1: ' + clidisplay.help_header(r'\2'), line)
+ if ngroups == 0:
+ line = self._WARNS.sub(clidisplay.warn(r'\1'), line)
+ line = self._OK.sub(clidisplay.ok(r'\1'), line)
+ line = self._ERROR.sub(clidisplay.error(r'\1'), line)
+ return line
+
+ def __call__(self, text):
+ return '\n'.join([self._filter(line) for line in text.splitlines()]) + '\n'
+
+
+def crm_mon(opts=''):
+ """
+ Run 'crm_mon -1'
+ opts: Additional options to pass to crm_mon
+ returns: rc, stdout
+ """
+ global _crm_mon
+ shell = ShellUtils()
+ if _crm_mon is None:
+ prog = utils.is_program("crm_mon")
+ if not prog:
+ raise IOError("crm_mon not available, check your installation")
+ _, out = shell.get_stdout("%s --help" % (prog))
+ if "--pending" in out:
+ _crm_mon = "%s -1 -j" % (prog)
+ else:
+ _crm_mon = "%s -1" % (prog)
+
+ status_cmd = "%s %s" % (_crm_mon, opts)
+ return shell.get_stdout_stderr(utils.add_sudo(status_cmd))
+
+
+def cmd_status(args):
+ '''
+ Calls crm_mon -1, passing optional extra arguments.
+ Displays the output, paging if necessary.
+ Raises IOError if crm_mon fails.
+ '''
+ opts = {
+ "bynode": "-n",
+ "inactive": "-r",
+ "ops": "-o",
+ "timing": "-t",
+ "failcounts": "-f",
+ "verbose": "-V",
+ "quiet": "-Q",
+ "html": "--output-as html",
+ "xml": "--output-as xml",
+ "simple": "-s",
+ "tickets": "-c",
+ "noheaders": "-D",
+ "detail": "-R",
+ "brief": "-b",
+ "full": "-ncrft",
+ }
+ extra = ' '.join(opts.get(arg, arg) for arg in args)
+ if not args:
+ extra = "-r"
+ rc, s, err = crm_mon(extra)
+ if rc != 0:
+ raise IOError(f"{err} (rc={rc})")
+
+ utils.page_string(CrmMonFilter()(s))
+ return True
+
+
+def cmd_verify(args):
+ '''
+ Calls crm_verify -LV; ptest -L -VVVV
+ '''
+ from . import config
+ if "ptest" in config.core.ptest:
+ cmd1 = "crm_verify -LVVV; %s -L -VVVV" % (config.core.ptest)
+ else:
+ cmd1 = "crm_verify -LVVV; %s -LjV" % (config.core.ptest)
+
+ if "scores" in args:
+ cmd1 += " -s"
+
+ cmd1 = utils.add_sudo(cmd1)
+ rc, s, e = ShellUtils().get_stdout_stderr(cmd1)
+ e = '\n'.join(clidisplay.error(l) for l in e.split('\n')).strip()
+ utils.page_string("\n".join((s, e)))
+ return rc == 0 and not e
diff --git a/crmsh/command.py b/crmsh/command.py
new file mode 100644
index 0000000..35b9142
--- /dev/null
+++ b/crmsh/command.py
@@ -0,0 +1,593 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+# - Base class for UI levels
+# - Decorators and other helper functions for the UI
+# Mostly, what these functions do is store extra metadata
+# inside the functions.
+
+import inspect
+import re
+from . import help as help_module
+from . import ui_utils
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+def name(n):
+ '''
+ Overrides the name of the command.
+ This is useful to handle commands with
+ dashes instead of underlines, or commands
+ with awkward names (like commands with a
+ leading underscore).
+ '''
+ def inner(fn):
+ setattr(fn, '_name', n)
+ return fn
+ return inner
+
+
+def alias(*aliases):
+ '''
+ Adds aliases for the command. The command
+ will also be callable using the alias. The
+ command name set in the command context will
+ reflect the alias used (so the same command can
+ behave differently depending on the alias).
+ '''
+ def inner(fn):
+ setattr(fn, '_aliases', aliases)
+ return fn
+ return inner
+
+
+def level(level_class):
+ '''
+ Changes the command into a level movement.
+ Calling the command doesn't actually call the
+ member function this decorator is applied to, so
+ don't put any code in that function.
+
+ This is a bit awkward, but given how decorators work,
+ it's the best I could think of.
+ '''
+ def inner(fn):
+ # check signature of given level function
+ _check_args(fn, ('self',))
+
+ setattr(fn, '_ui_type', 'level')
+ setattr(fn, '_level', level_class)
+
+ def default(arg, val):
+ if not hasattr(fn, arg):
+ setattr(fn, arg, val)
+
+ default('_aliases', tuple())
+ default('_short_help', None)
+ default('_long_help', None)
+ return fn
+ return inner
+
+
+def help(doc):
+ '''
+ Use to set a help text for a command or level
+ which isn't documented in crm.8.adoc.
+
+ The first line of the doc string will be used as
+ the short help, the rest will be used as the full
+ help message.
+ '''
+ doc_split = doc.split('\n', 1)
+
+ def inner(fn):
+ setattr(fn, '_short_help', doc_split[0])
+ if len(doc_split) > 1:
+ setattr(fn, '_long_help', doc_split[1])
+ else:
+ setattr(fn, '_long_help', '')
+ return fn
+ return inner
+
+
+def skill_level(new_level):
+ '''
+ Use to set the required skill level of a command:
+
+ @command
+ @skill_level('administrator')
+ def do_rmrf(self, cmd, args):
+ ...
+ '''
+ if isinstance(new_level, str):
+ levels = {'operator': 0, 'administrator': 1, 'expert': 2}
+ if new_level.lower() not in levels:
+ raise ValueError("Unknown skill level: " + new_level)
+ new_level = levels[new_level.lower()]
+
+ def inner(fn):
+ setattr(fn, '_skill_level', new_level)
+ return fn
+ return inner
+
+
+def wait(fn):
+ '''
+ A command with this decorator will
+ force the interactive shell to wait
+ for the command to complete.
+
+ @command
+ @wait
+ def do_bigop(self, cmd, args):
+ ...
+ '''
+ setattr(fn, '_wait', True)
+ return fn
+
+
+def completer(cb):
+ '''
+ Use to set a tab completer for the command.
+ The completer is called for the command, regardless
+ of the number of arguments called so far
+ '''
+ def inner(fn):
+ setattr(fn, '_completer', cb)
+ return fn
+ return inner
+
+
+def completers(*fns):
+ '''
+ Use to set a list of positional tab completers for the command.
+ Each completer gets as its argument the command line entered so far,
+ and returns a list of possible completions.
+ '''
+ def cfn(args):
+ nargs = len(args) - 1
+ if nargs == 0:
+ return [args[0]]
+ if nargs <= len(fns):
+ return fns[nargs-1](args)
+ return []
+
+ def inner(fn):
+ setattr(fn, '_completer', cfn)
+ return fn
+ return inner
+
+
+def completers_repeating(*fns):
+ '''
+ Like completers, but calls the last completer
+ for any additional arguments
+ '''
+ def cfn(args):
+ nargs = len(args) - 1
+ if nargs == 0:
+ return [args[0]]
+ if nargs <= len(fns):
+ return fns[nargs-1](args)
+ return fns[-1](args)
+
+ def inner(fn):
+ setattr(fn, '_completer', cfn)
+ return fn
+ return inner
+
+
+def _cd_completer(args, context):
+ """
+ more like bash cd completion
+ """
+ def current_completions():
+ return context.current_level().get_completions()
+
+ def is_sublevel(l):
+ return context.current_level().is_sublevel(l)
+
+ def next_completions(token):
+ info = context.current_level().get_child(token)
+ context.enter_level(info.level)
+ return [l for l in current_completions() if is_sublevel(l)]
+
+ def prev_completions():
+ return [l for l in context.previous_level().get_completions()
+ if context.previous_level().is_sublevel(l)]
+
+ if len(args) == 1 and args[0] == 'cd':
+ # complete the 'cd' command self
+ return current_completions()
+ if len(args) == 2:
+ if args[1] in current_completions():
+ return [args[1] + '/']
+ if args[1] == '..' and context.previous_level():
+ return ['../']
+ if args[1] == '../' and context.previous_level():
+ return [args[1] + l for l in prev_completions()]
+ if args[1].endswith("/"):
+ return [args[1]+l for l in next_completions(args[1].strip('/'))]
+ if re.search(r'\.\./.+', args[1]):
+ return ['../' + l for l in prev_completions()]
+ if re.search(r'.+/.+', args[1]):
+ token = args[1].split('/')[0]
+ return [token+'/'+l for l in next_completions(token)]
+ if len(args) == 3 and not args[-1]:
+ # prevent '..' completion happend many times triggerd by Tab
+ return
+
+ ret = []
+ if context.previous_level():
+ ret += ['..']
+ # look out where 'cd' command can enter
+ return ret + [l for l in current_completions() if is_sublevel(l)]
+
+
+def _help_completer(args, context):
+ 'TODO: make better completion'
+ return help_module.list_help_topics() + context.current_level().get_completions()
+
+
+def fuzzy_get(items, s):
+ """
+ Finds s in items using a fuzzy
+ matching algorithm:
+
+ 1. if exact match, return value
+ 2. if unique prefix, return value
+ 3. if unique prefix substring, return value
+ """
+ found = items.get(s)
+ if found:
+ return found
+
+ def fuzzy_match(rx):
+ try:
+ matcher = re.compile(rx, re.I)
+ matches = [c
+ for m, c in items.items()
+ if matcher.match(m)]
+ if len(matches) == 1:
+ return matches[0]
+ except re.error as e:
+ raise ValueError(e)
+ return None
+
+ # prefix match
+ m = fuzzy_match(s + '.*')
+ if m:
+ return m
+ # substring match
+ m = fuzzy_match('.*'.join(s) + '.*')
+ if m:
+ return m
+ return None
+
+
+class UI(object):
+ '''
+ Base class for all ui levels.
+ Things that I need to solve:
+ - Error handling
+ - Help
+ - Completion
+ '''
+
+ # Name of level: override this in the subclass.
+ name = None
+
+ def requires(self):
+ '''
+ Returns False if requirements for level are
+ not met. Checked before entering the level.
+ '''
+ return True
+
+ def end_game(self, no_questions_asked=False):
+ '''
+ Overriding end_game() allows levels to ask
+ for confirmation before exiting.
+ '''
+ pass
+
+ def should_wait(self):
+ '''
+ A kludge to allow in-transit configuration changes to
+ make us wait on transition to finish. Needs to be
+ implemented in the level (currently, just configure).
+ '''
+ return False
+
+ @alias('end', 'back')
+ @help('''Go back to previous level
+Navigates back in the user interface.
+''')
+ def do_up(self, context):
+ '''
+ TODO: Implement full cd navigation. cd ../configure, for example
+ Also implement ls to list commands / levels from current location
+ '''
+ ok = context.up()
+ context.save_stack()
+ return ok
+
+ @help('''List levels and commands
+Lists the available sublevels and commands
+at the current level.
+''')
+ def do_ls(self, context):
+ '''
+ Shows list of places to go and commands to call
+ '''
+ out = []
+ res = []
+ max_width = 16
+ if context.previous_level():
+ out = ['..']
+ out += context.current_level().get_completions()
+ for o in sorted(out):
+ if o.startswith('-') or o.startswith('_'):
+ continue
+ if max_width < len(o):
+ max_width = len(o)
+ res.append(o)
+
+ if max_width >= 16:
+ max_width += 2
+
+ colnum = 3
+ rownum = len(res) // colnum
+ for i in range(rownum):
+ for x in res[i::rownum]:
+ print("%-0*s" % (max_width, x), end=' ')
+ print('')
+
+
+ @help('''Navigate the level structure
+This command works similar to how `cd` works in a regular unix
+system shell. `cd ..` returns to the previous level.
+
+If the current level is `resource`, executing `cd ../configure` will
+move directly to the `configure` level.
+
+One difference between this command and the usual behavior of `cd`
+is that without any argument, this command will go back one level
+instead of doing nothing.
+
+Examples:
+....
+ cd ..
+ cd configure
+ cd ../configure
+ cd configure/ra
+....
+''')
+ @completer(_cd_completer)
+ def do_cd(self, context, optarg='..'):
+ ok = True
+ path = optarg.split('/', 1)
+ if len(path) == 1:
+ path = path[0]
+ if path == '..':
+ ok = context.up()
+ elif path == '.' or not path:
+ return ok
+ else:
+ info = context.current_level().get_child(path)
+ if not info or not info.level:
+ logger.debug("children: %s", self._children)
+ context.fatal_error("%s not found in %s" % (path, context.current_level()))
+ context.enter_level(info.level)
+ else:
+ if not self.do_cd(context, path[0]):
+ ok = False
+ if not self.do_cd(context, path[1]):
+ ok = False
+ context.save_stack()
+ return True
+
+ @alias('bye', 'exit')
+ @help('''Exit the interactive shell
+Terminates `crm` immediately. For some levels, `quit` may
+ask for confirmation before terminating, if there are
+uncommitted changes to the configuration.
+''')
+ def do_quit(self, context):
+ context.quit()
+
+ @alias('?', '-h', '--help')
+ @help('''show help (help topics for list of topics)
+The help subsystem consists of the command reference and a list
+of topics. The former is what you need in order to get the
+details regarding a specific command. The latter should help with
+concepts and examples.
+
+Examples:
+....
+ help Introduction
+ help quit
+....
+''')
+ @completer(_help_completer)
+ def do_help(self, context, subject=None, subtopic=None):
+ """usage: help topic|level|command"""
+ h = help_module.help_contextual(context.level_name(), subject, subtopic)
+ h.paginate()
+ context.command_name = ""
+
+ def get_completions(self):
+ '''
+ return tab completions
+ '''
+ return [x for x in self._children.keys() if x not in self._aliases]
+
+ def get_child(self, child):
+ '''
+ Returns child info for the given name, or None
+ if the child is not found.
+
+ This tries very hard to find a matching child:
+ If none is found, a fuzzy matcher is used to
+ pick a close match
+ '''
+ from . import options
+ if options.shell_completion:
+ return self._children.get(child)
+ else:
+ return fuzzy_get(self._children, child)
+
+ def is_sublevel(self, child):
+ '''
+ True if the given name is a sublevel of this level
+ '''
+ sub = self.get_child(child)
+ return sub and sub.type == 'level'
+
+ @classmethod
+ def children(cls):
+ return cls._children
+
+ @classmethod
+ def init_ui(cls):
+ def get_if_command(attr):
+ "Return the named attribute if it's a command"
+ child = getattr(cls, attr)
+ return child if attr.startswith('do_') and inspect.isfunction(child) else None
+
+ def add_aliases(children, info, aliases):
+ "Add any aliases for command to child map"
+ for alias in info.aliases:
+ aliases.append(alias)
+ children[alias] = info
+
+ def add_help(info):
+ "Add static help to the help system"
+ if info.short_help:
+ entry = help_module.HelpEntry(info.short_help, info.long_help, generated=True)
+ elif info.type == 'command':
+ entry = help_module.HelpEntry(
+ 'Help for command ' + info.name,
+ 'Note: This command is not documented.\n' +
+ 'Usage: %s %s' % (info.name,
+ ui_utils.pretty_arguments(info.function, nskip=2)),
+ generated=True)
+ elif info.type == 'level':
+ entry = help_module.HelpEntry('Help for level ' + info.name,
+ 'Note: This level is not documented.\n',
+ generated=True)
+ if info.type == 'command':
+ help_module.add_help(entry, level=cls.name, command=info.name)
+ elif info.type == 'level':
+ help_module.add_help(entry, level=info.name)
+
+ def prepare(children, child, aliases):
+ info = ChildInfo(child, cls)
+ if info.type == 'command' and not is_valid_command_function(info.function):
+ raise ValueError("Invalid command function: %s.%s" %
+ (cls.__name__, info.function.__name__))
+ children[info.name] = info
+ add_aliases(children, info, aliases)
+ add_help(info)
+
+ children = {}
+ aliases = []
+ for child_name in dir(cls):
+ if child_name == 'do_up' and re.search("ui_root.Root", str(cls)):
+ continue
+ child = get_if_command(child_name)
+ if child:
+ prepare(children, child, aliases)
+ setattr(cls, '_children', children)
+ setattr(cls, '_aliases', aliases)
+ return children
+
+
+def make_name(new_name):
+ '''
+ Generate command name from command function name.
+ '''
+ if new_name.startswith('do_'):
+ return new_name[3:]
+ return new_name
+
+
+class ChildInfo(object):
+ '''
+ Declares the given method a command method.
+ Sets extra attributes in the function itself,
+ which are picked up by the UILevel class and used
+ to generate ChildInfo data.
+
+ The given method is expected to take a first parameter
+ (after self) which is a UI context, which holds information
+ about where the user came from when calling the command, controls
+ for manipulating the current level (up(), quit(), etc),
+ the name used when calling the command, error reporting and warning
+ methods.
+
+ The rest of the parameters are the actual arguments to the method. These
+ are tokenized using shlex and then matched to the actual arguments of the
+ method.
+
+ Information about a child node in the hierarchy:
+ A node is either a level or a command.
+ '''
+ def __init__(self, fn, parent):
+ def maybe(attr, default):
+ if hasattr(fn, attr):
+ return getattr(fn, attr)
+ return default
+
+ self.function = fn
+ self.name = maybe('_name', make_name(fn.__name__))
+ self.type = maybe('_ui_type', 'command')
+ self.aliases = maybe('_aliases', tuple())
+ self.short_help = maybe('_short_help', None)
+ self.long_help = maybe('_long_help', None)
+ self.skill_level = maybe('_skill_level', 0)
+ self.wait = maybe('_wait', False)
+ self.level = maybe('_level', None)
+ self.completer = maybe('_completer', None)
+ self.parent = parent
+ self.children = {}
+ if self.type == 'level' and self.level:
+ self.children = self.level.init_ui()
+
+ def complete(self, context, args):
+ '''
+ Execute the completer for this command with the given arguments.
+ The completer mostly completes based on argument position, but
+ some commands are context sensitive...
+ - make args[0] be name of command
+ '''
+ ret = []
+ if self.completer is not None:
+ specs = inspect.getfullargspec(self.completer)
+ if 'context' in specs.args:
+ ret = self.completer([self.name] + args, context)
+ else:
+ ret = self.completer([self.name] + args)
+ return ret
+
+ def __repr__(self):
+ return "%s:%s (%s)" % (self.type, self.name, self.short_help)
+
+
+def is_valid_command_function(fn):
+ '''
+ Returns True if fn is a valid command function:
+ named do_xxx, takes (self, context) as the first two parameters
+ '''
+ specs = inspect.getfullargspec(fn)
+ return len(specs.args) >= 2 and specs.args[0] == 'self' and specs.args[1] == 'context'
+
+
+def _check_args(fn, expected):
+ argnames = fn.__code__.co_varnames[:fn.__code__.co_argcount]
+ if argnames != expected:
+ raise ValueError(fn.__name__ +
+ ": Expected method with signature " + repr(expected))
diff --git a/crmsh/completers.py b/crmsh/completers.py
new file mode 100644
index 0000000..28c576b
--- /dev/null
+++ b/crmsh/completers.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+# Helper completers
+
+from . import xmlutil
+
+
+def choice(lst):
+ '''
+ Static completion from a list
+ '''
+ def completer(args):
+ return lst
+ return completer
+
+
+null = choice([])
+attr_id = choice(["id="])
+
+def call(fn, *fnargs):
+ '''
+ Call the given function with the given arguments.
+ The function has to return a list of completions.
+ '''
+ def completer(args):
+ return fn(*fnargs)
+ return completer
+
+
+def join(*fns):
+ '''
+ Combine the output of several completers
+ into a single completer.
+ '''
+ def completer(args):
+ ret = []
+ for fn in fns:
+ ret += list(fn(args))
+ return ret
+ return completer
+
+
+booleans = choice(['yes', 'no', 'true', 'false', 'on', 'off'])
+
+
+def resources(args=None):
+ cib_el = xmlutil.resources_xml()
+ if cib_el is None:
+ return []
+ nodes = xmlutil.get_interesting_nodes(cib_el, [])
+ rsc_id_list = [x.get("id") for x in nodes if xmlutil.is_resource(x)]
+ if args and args[0] in ['promote', 'demote']:
+ return [item for item in rsc_id_list if xmlutil.RscState().is_ms_or_promotable_clone(item)]
+ if args and args[0] == "started":
+ return [item for item in rsc_id_list if xmlutil.RscState().is_running(item)]
+ if args and args[0] == "stopped":
+ return [item for item in rsc_id_list if not xmlutil.RscState().is_running(item)]
+ return rsc_id_list
+
+
+def resources_started(args=None):
+ return resources(["started"])
+
+
+def resources_stopped(args=None):
+ return resources(["stopped"])
+
+
+def primitives(args):
+ cib_el = xmlutil.resources_xml()
+ if cib_el is None:
+ return []
+ nodes = xmlutil.get_interesting_nodes(cib_el, [])
+ return [x.get("id") for x in nodes if xmlutil.is_primitive(x)]
+
+
+nodes = call(xmlutil.listnodes)
+online_nodes = call(xmlutil.CrmMonXmlParser().get_node_list, "online")
+standby_nodes = call(xmlutil.CrmMonXmlParser().get_node_list, "standby")
+
+shadows = call(xmlutil.listshadows)
+
+status_option = """full bynode inactive ops timing failcounts
+ verbose quiet xml simple tickets noheaders
+ detail brief""".split()
diff --git a/crmsh/config.py b/crmsh/config.py
new file mode 100644
index 0000000..0530ef0
--- /dev/null
+++ b/crmsh/config.py
@@ -0,0 +1,513 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+'''
+Holds user-configurable options.
+'''
+
+import os
+import re
+import configparser
+from contextlib import contextmanager
+from typing import List
+from . import userdir
+
+
+@contextmanager
+def _disable_exception_traceback():
+ """
+ All traceback information is suppressed and only the exception type and value are printed
+ """
+ default_value = getattr(sys, "tracebacklimit", 1000) # `1000` is a Python's default value
+ sys.tracebacklimit = 0
+ yield
+ sys.tracebacklimit = default_value # revert changes
+
+
+def configure_libdir():
+ '''
+ sysconfig is only available in 2.7 and above
+ MULTIARCH is a debian specific configuration variable
+ '''
+ dirs = ('/usr/lib64', '/usr/libexec', '/usr/lib',
+ '/usr/local/lib64', '/usr/local/libexec', '/usr/local/lib')
+ try:
+ import sysconfig
+ multiarch = sysconfig.get_config_var('MULTIARCH')
+ if multiarch:
+ dirs += ('/usr/lib/%s' % multiarch,
+ '/usr/local/lib/%s' % multiarch)
+ except ImportError:
+ pass
+ return dirs
+
+
+_SYSTEMWIDE = '/etc/crm/crm.conf'
+_PERUSER = os.getenv("CRM_CONFIG_FILE") or os.path.join(userdir.CONFIG_HOME, 'crm.conf')
+
+_PATHLIST = {
+ 'datadir': ('/usr/share', '/usr/local/share', '/opt'),
+ 'cachedir': ('/var/cache', '/opt/cache'),
+ 'libdir': configure_libdir(),
+ 'varlib': ('/var/lib', '/opt/var/lib'),
+ 'wwwdir': ('/srv/www', '/var/www')
+}
+
+
+def make_path(path):
+ """input: path containing %(?)s-statements
+ output: path with no such statements"""
+ m = re.match(r'\%\(([^\)]+)\)(.+)', path)
+ if m:
+ t = m.group(1)
+ for dd in _PATHLIST[t]:
+ if os.path.isdir(path % {t: dd}):
+ return path % {t: dd}
+ return path % {t: _PATHLIST[t][0]}
+ return path
+
+
+def find_pacemaker_daemons():
+ '''
+ Search for the pacemaker daemon location by trying to find
+ where the daemons are. The control daemon is either
+ pacemaker-controld (2.0+) or crmd depending on the version.
+ '''
+ candidate_dirs = ('{}/pacemaker'.format(d) for d in configure_libdir())
+ for d in candidate_dirs:
+ daemon = '{}/pacemaker-controld'.format(d)
+ if os.path.exists(daemon):
+ return d
+ daemon = '{}/crmd'.format(d)
+ if os.path.exists(daemon):
+ return d
+ return '/usr/lib/pacemaker'
+
+
+# opt_ classes
+# members: default, completions, validate()
+
+class opt_program(object):
+ def __init__(self, envvar, proglist):
+ self.default = ''
+ if envvar and os.getenv(envvar):
+ self.default = os.getenv(envvar)
+ else:
+ for prog in proglist:
+ p = self._find_program(prog)
+ if p is not None:
+ self.default = p
+ break
+ self.completions = proglist
+
+ def _find_program(self, prog):
+ """Is this program available?"""
+ paths = os.getenv("PATH").split(os.pathsep)
+ paths.extend(['/usr/bin', '/usr/sbin', '/bin', '/sbin'])
+ if prog.startswith('/'):
+ filename = make_path(prog)
+ if os.path.isfile(filename) and os.access(filename, os.X_OK):
+ return filename
+ elif prog.startswith('%'):
+ prog = make_path(prog)
+ for p in paths:
+ filename = os.path.join(p, prog)
+ if os.path.isfile(filename) and os.access(filename, os.X_OK):
+ return filename
+ else:
+ for p in paths:
+ filename = make_path(os.path.join(p, prog))
+ if os.path.isfile(filename) and os.access(filename, os.X_OK):
+ return filename
+ return None
+
+ def validate(self, prog):
+ if self._find_program(prog) is None:
+ raise ValueError("%s does not exist or is not a program" % prog)
+
+ def get(self, value):
+ if value.startswith('$'):
+ return os.getenv(value[1:])
+ elif value.startswith('\\$'):
+ return value[1:]
+ return value
+
+
+class opt_string(object):
+ def __init__(self, value):
+ self.default = value
+ self.completions = ()
+
+ def validate(self, val):
+ return True
+
+ def get(self, value):
+ return value
+
+
+class opt_choice(object):
+ def __init__(self, dflt, choices):
+ self.default = dflt
+ self.completions = choices
+
+ def validate(self, val):
+ if val not in self.completions:
+ raise ValueError("%s not in %s" % (val, ', '.join(self.completions)))
+
+ def get(self, value):
+ return value
+
+
+class opt_multichoice(object):
+ def __init__(self, dflt, choices):
+ self.default = dflt
+ self.completions = choices
+
+ def validate(self, val):
+ vals = [x.strip() for x in val.split(',')]
+ for otype in vals:
+ if otype not in self.completions:
+ raise ValueError("%s not in %s" % (val, ', '.join(self.completions)))
+
+ def get(self, value):
+ return value
+
+
+class opt_boolean(object):
+ def __init__(self, dflt):
+ self.default = dflt
+ self.completions = ('yes', 'true', 'on', '1', 'no', 'false', 'off', '0')
+
+ def validate(self, val):
+ if val is True:
+ val = 'true'
+ elif val is False:
+ val = 'false'
+ val = val.lower()
+ if val not in self.completions:
+ raise ValueError("Not a boolean: %s (try one of: %s)" % (
+ val, ', '.join(self.completions)))
+
+ def get(self, value):
+ return value.lower() in ('yes', 'true', 'on', '1')
+
+
+class opt_dir(object):
+ def __init__(self, path):
+ self.default = make_path(path)
+ self.completions = []
+
+ def validate(self, val):
+ if not os.path.isdir(val):
+ raise ValueError("Directory not found: %s" % (val))
+
+ def get(self, value):
+ return value
+
+
+class opt_color(object):
+ def __init__(self, val):
+ self.default = val
+ self.completions = ('black', 'blue', 'green', 'cyan',
+ 'red', 'magenta', 'yellow', 'white',
+ 'bold', 'blink', 'dim', 'reverse',
+ 'underline', 'normal')
+
+ def validate(self, val):
+ for v in val.split(' '):
+ if v not in self.completions:
+ raise ValueError('Invalid color ' + val)
+
+ def get(self, value):
+ return [s.rstrip(',') for s in value.split(' ')] or ['normal']
+
+
+class opt_list(object):
+ def __init__(self, deflist):
+ self.default = ' '.join(deflist)
+ self.completions = deflist
+
+ def validate(self, val):
+ pass
+
+ def get(self, value):
+ return [s.rstrip(',') for s in value.split(' ')]
+
+
+DEFAULTS = {
+ 'core': {
+ 'editor': opt_program('EDITOR', ('vim', 'vi', 'emacs', 'nano')),
+ 'pager': opt_program('PAGER', ('less', 'more', 'pg')),
+ 'user': opt_string(''),
+ 'hosts': opt_list([]), # 'alice@host1, bob@host2'
+ 'no_generating_ssh_key': opt_boolean('no'),
+ 'skill_level': opt_choice('expert', ('operator', 'administrator', 'expert')),
+ 'sort_elements': opt_boolean('yes'),
+ 'check_frequency': opt_choice('always', ('always', 'on-verify', 'never')),
+ 'check_mode': opt_choice('strict', ('strict', 'relaxed')),
+ 'wait': opt_boolean('no'),
+ 'add_quotes': opt_boolean('yes'),
+ 'manage_children': opt_choice('ask', ('ask', 'never', 'always')),
+ 'force': opt_boolean('no'),
+ 'debug': opt_boolean('no'),
+ 'ptest': opt_program('', ('ptest', 'crm_simulate')),
+ 'dotty': opt_program('', ('dotty',)),
+ 'dot': opt_program('', ('dot',)),
+ 'ignore_missing_metadata': opt_boolean('no'),
+ 'report_tool_options': opt_string(''),
+ 'lock_timeout': opt_string('120'),
+ 'OCF_1_1_SUPPORT': opt_boolean('no'),
+ 'obscure_pattern': opt_string('passw*')
+ },
+ 'path': {
+ 'sharedir': opt_dir('%(datadir)s/crmsh'),
+ 'cache': opt_dir('%(cachedir)s/crm'),
+ 'crm_config': opt_dir('%(varlib)s/pacemaker/cib'),
+ 'crm_daemon_dir': opt_dir(find_pacemaker_daemons()),
+ 'crm_daemon_user': opt_string('hacluster'),
+ 'ocf_root': opt_dir('%(libdir)s/ocf'),
+ 'crm_dtd_dir': opt_dir('%(datadir)s/pacemaker'),
+ 'pe_state_dir': opt_dir('%(varlib)s/pacemaker/pengine'),
+ 'heartbeat_dir': opt_dir('%(varlib)s/heartbeat'),
+ 'hb_delnode': opt_program('', ('%(datadir)s/heartbeat/hb_delnode',)),
+ 'nagios_plugins': opt_dir('%(libdir)s/nagios/plugins'),
+ 'hawk_wizards': opt_dir('%(wwwdir)s/hawk/config/wizard'),
+ },
+ 'color': {
+ 'style': opt_multichoice('color', ('plain', 'color-always', 'color', 'uppercase')),
+ 'error': opt_color('red bold'),
+ 'ok': opt_color('green bold'),
+ 'warn': opt_color('yellow bold'),
+ 'info': opt_color('cyan'),
+ 'help_keyword': opt_color('blue bold underline'),
+ 'help_header': opt_color('normal bold'),
+ 'help_topic': opt_color('yellow bold'),
+ 'help_block': opt_color('cyan'),
+ 'keyword': opt_color('yellow'),
+ 'identifier': opt_color('normal'),
+ 'attr_name': opt_color('cyan'),
+ 'attr_value': opt_color('red'),
+ 'resource_reference': opt_color('green'),
+ 'id_reference': opt_color('green'),
+ 'score': opt_color('magenta'),
+ 'ticket': opt_color('magenta'),
+ },
+ 'report': {
+ 'from_time': opt_string('-12H'),
+ 'compress': opt_boolean('yes'),
+ 'speed_up': opt_boolean('no'),
+ 'collect_extra_logs': opt_string('/var/log/messages \
+ /var/log/crmsh/crmsh.log /etc/crm/profiles.yml /etc/crm/crm.conf'),
+ 'remove_exist_dest': opt_boolean('no'),
+ 'single_node': opt_boolean('no'),
+ 'sanitize_rule': opt_string('passw.*'),
+ 'verbosity': opt_string('0')
+ }
+}
+
+_parser = None
+
+
+def _stringify(val):
+ if val is True:
+ return 'true'
+ elif val is False:
+ return 'false'
+ elif isinstance(val, str):
+ return val
+ else:
+ return str(val)
+
+
+class _Configuration(object):
+ def __init__(self):
+ self._defaults = None
+ self._systemwide = None
+ self._user = None
+
+ def _safe_read(self, config_parser_inst, file_list):
+ """
+ Try to handle configparser.MissingSectionHeaderError while reading
+ """
+ try:
+ config_parser_inst.read(file_list)
+ except configparser.MissingSectionHeaderError:
+ with _disable_exception_traceback():
+ raise
+
+ def load(self):
+ self._defaults = configparser.ConfigParser()
+ for section, keys in DEFAULTS.items():
+ self._defaults.add_section(section)
+ for key, opt in keys.items():
+ self._defaults.set(section, key, opt.default)
+
+ if os.path.isfile(_SYSTEMWIDE):
+ self._systemwide = configparser.ConfigParser()
+ self._safe_read(self._systemwide, [_SYSTEMWIDE])
+ # for backwards compatibility with <=2.1.1 due to ridiculous bug
+ elif os.path.isfile("/etc/crm/crmsh.conf"):
+ self._systemwide = configparser.ConfigParser()
+ self._safe_read(self._systemwide, ["/etc/crm/crmsh.conf"])
+ if os.path.isfile(_PERUSER):
+ self._user = configparser.ConfigParser()
+ self._safe_read(self._user, [_PERUSER])
+
+ def save(self):
+ if self._user:
+ if not os.path.isdir(os.path.dirname(_PERUSER)):
+ os.makedirs(os.path.dirname(_PERUSER))
+ fp = open(_PERUSER, 'w')
+ self._user.write(fp)
+ fp.close()
+
+ def get_impl(self, section, name):
+ try:
+ if self._user and self._user.has_option(section, name):
+ return self._user.get(section, name) or ''
+ if self._systemwide and self._systemwide.has_option(section, name):
+ return self._systemwide.get(section, name) or ''
+ return self._defaults.get(section, name) or ''
+ except configparser.NoOptionError as e:
+ raise ValueError(e)
+
+ def get(self, section, name, raw=False):
+ if raw:
+ return self.get_impl(section, name)
+ return DEFAULTS[section][name].get(self.get_impl(section, name))
+
+ def set(self, section, name, value):
+ if section not in ('core', 'path', 'color', 'report'):
+ raise ValueError("Setting invalid section " + str(section))
+ if not self._defaults.has_option(section, name):
+ raise ValueError("Setting invalid option %s.%s" % (section, name))
+ DEFAULTS[section][name].validate(value)
+ if self._user is None:
+ self._user = configparser.ConfigParser()
+ if not self._user.has_section(section):
+ self._user.add_section(section)
+ self._user.set(section, name, _stringify(value))
+
+ def items(self, section):
+ return [(k, self.get(section, k)) for k, _ in self._defaults.items(section)]
+
+ def configured_keys(self, section):
+ ret = []
+ if self._systemwide and self._systemwide.has_section(section):
+ ret += self._systemwide.options(section)
+ if self._user and self._user.has_section(section):
+ ret += self._user.options(section)
+ return list(set(ret))
+
+ def reset(self):
+ '''reset to what is on disk'''
+ self._user = configparser.ConfigParser()
+ self._user.read([_PERUSER])
+
+
+_configuration = _Configuration()
+
+
+class _Section(object):
+ def __init__(self, section):
+ object.__setattr__(self, 'section', section)
+
+ def __getattr__(self, name):
+ return _configuration.get(self.section, name)
+
+ def __setattr__(self, name, value):
+ _configuration.set(self.section, name, value)
+
+ def items(self):
+ return _configuration.items(self.section)
+
+
+def load():
+ _configuration.load()
+
+ os.environ["OCF_ROOT"] = _configuration.get('path', 'ocf_root')
+
+
+def save():
+ '''
+ Only save options that are not default
+ '''
+ _configuration.save()
+
+
+def set_option(section, option, value):
+ if not isinstance(value, List):
+ _configuration.set(section, option, value)
+ return
+ string = ""
+ first = True
+ for item in value:
+ if first:
+ first = False
+ else:
+ string += ", "
+ string += str(item)
+ _configuration.set(section, option, string)
+
+
+def get_option(section, option, raw=False):
+ '''
+ Return the given option.
+ If raw is True, return the configured value.
+ Example: for a boolean, returns "yes", not True
+ '''
+ return _configuration.get(section, option, raw=raw)
+
+
+def get_all_options():
+ '''Returns a list of all configurable options'''
+ ret = []
+ for sname, section in DEFAULTS.items():
+ ret += ['%s.%s' % (sname, option) for option in list(section.keys())]
+ return sorted(ret)
+
+
+def get_configured_options():
+ '''Returns a list of all options that have a non-default value'''
+ ret = []
+ for sname in DEFAULTS:
+ for key in _configuration.configured_keys(sname):
+ ret.append('%s.%s' % (sname, key))
+ return ret
+
+
+def complete(section, option):
+ s = DEFAULTS.get(section)
+ if not s:
+ return []
+ o = s.get(option)
+ if not o:
+ return []
+ return o.completions
+
+
+def has_user_config():
+ return os.path.isfile(_PERUSER)
+
+
+def reset():
+ _configuration.reset()
+
+
+load()
+core = _Section('core')
+path = _Section('path')
+color = _Section('color')
+report = _Section('report')
+
+
+def load_version():
+ version = 'dev'
+ versioninfo_file = os.path.join(path.sharedir, 'version')
+ if os.path.isfile(versioninfo_file):
+ with open(versioninfo_file) as f:
+ version = f.readline().strip() or version
+ return version
+
+
+VERSION = load_version()
+CRM_VERSION = str(VERSION)
diff --git a/crmsh/constants.py b/crmsh/constants.py
new file mode 100644
index 0000000..e101bc7
--- /dev/null
+++ b/crmsh/constants.py
@@ -0,0 +1,538 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+from .ordereddict import odict
+
+
+# A list of all keywords introduced in the
+# CIB language.
+keywords = {
+ "node": "element",
+ "primitive": "element",
+ "resource": "element",
+ "group": "element",
+ "bundle": "element",
+ "clone": "element",
+ "ms": "element",
+ "master": "element",
+ "location": "element",
+ "colocation": "element",
+ "collocation": "element",
+ "order": "element",
+ "rsc_ticket": "element",
+ "rsc_template": "element",
+ "property": "element",
+ "rsc_defaults": "element",
+ "op_defaults": "element",
+ "acl_target": "element",
+ "acl_group": "element",
+ "user": "element",
+ "role": "element",
+ "fencing_topology": "element",
+ "fencing-topology": "element",
+ "tag": "element",
+ "alert": "element",
+ "monitor": "element",
+ "params": "subelement",
+ "meta": "subelement",
+ "attributes": "subelement",
+ "utilization": "subelement",
+ "operations": "subelement",
+ "op": "subelement",
+ "rule": "subelement",
+ "to": "subelement",
+ "inf": "value",
+ "INFINITY": "value",
+ "and": "op",
+ "or": "op",
+ "lt": "op",
+ "gt": "op",
+ "lte": "op",
+ "gte": "op",
+ "eq": "op",
+ "ne": "op",
+ "defined": "op",
+ "not_defined": "op",
+ "in_range": "op",
+ "in": "op",
+ "date_spec": "op",
+ "spec": "op",
+ "date": "value",
+ "yes": "value",
+ "no": "value",
+ "true": "value",
+ "false": "value",
+ "on": "value",
+ "off": "value",
+ "normal": "value",
+ "member": "value",
+ "ping": "value",
+ "remote": "value",
+ "start": "value",
+ "stop": "value",
+ "Mandatory": "value",
+ "Optional": "value",
+ "Serialize": "value",
+ "ref": "value",
+ "xpath": "value",
+ "xml": "element",
+}
+
+cib_cli_map = {
+ "node": "node",
+ "primitive": "primitive",
+ "group": "group",
+ "clone": "clone",
+ "master": "ms",
+ "bundle": "bundle",
+ "rsc_location": "location",
+ "rsc_colocation": "colocation",
+ "rsc_order": "order",
+ "rsc_ticket": "rsc_ticket",
+ "template": "rsc_template",
+ "cluster_property_set": "property",
+ "rsc_defaults": "rsc_defaults",
+ "op_defaults": "op_defaults",
+ "acl_target": "acl_target",
+ "acl_group": "acl_group",
+ "acl_user": "user",
+ "acl_role": "role",
+ "fencing-topology": "fencing_topology",
+ "tag": "tag",
+ "alert": "alert",
+}
+container_tags = ("group", "clone", "ms", "master", "bundle")
+clonems_tags = ("clone", "ms", "master")
+resource_tags = ("primitive", "group", "clone", "ms", "master", "template", "bundle")
+constraint_tags = ("rsc_location", "rsc_colocation", "rsc_order", "rsc_ticket")
+constraint_rsc_refs = ("rsc", "with-rsc", "first", "then")
+children_tags = ("group", "primitive")
+nvpairs_tags = ("meta_attributes", "instance_attributes", "utilization")
+defaults_tags = ("rsc_defaults", "op_defaults")
+resource_cli_names = ("primitive", "group", "clone", "ms", "master", "rsc_template", "bundle")
+constraint_cli_names = ("location", "colocation", "collocation", "order", "rsc_ticket")
+nvset_cli_names = ("property", "rsc_defaults", "op_defaults")
+op_cli_names = ("monitor",
+ "start",
+ "stop",
+ "migrate_to",
+ "migrate_from",
+ "promote",
+ "demote",
+ "notify",
+ "reload")
+ra_operations = tuple(["probe"] + list(op_cli_names))
+
+subpfx_list = {
+ "instance_attributes": "instance_attributes",
+ "meta_attributes": "meta_attributes",
+ "utilization": "utilization",
+ "operations": "ops",
+ "rule": "rule",
+ "expression": "expression",
+ "date_expression": "expression",
+ "duration": "duration",
+ "date_spec": "date_spec",
+ "read": "read",
+ "write": "write",
+ "deny": "deny",
+}
+acl_rule_names = ("read", "write", "deny")
+acl_spec_map = odict({
+ "xpath": "xpath",
+ "ref": "ref",
+ "tag": "tag",
+ "attribute": "attribute",
+})
+# ACLs were rewritten in pacemaker 1.1.12
+# this is the new acl syntax
+acl_spec_map_2 = odict({
+ "xpath": "xpath",
+ "ref": "reference",
+ "reference": "reference",
+ "tag": "object-type",
+ "type": "object-type",
+ "attr": "attribute",
+ "attribute": "attribute"
+})
+
+acl_spec_map_2_rev = (('xpath', 'xpath'),
+ ('reference', 'ref'),
+ ('attribute', 'attr'),
+ ('object-type', 'type'))
+
+acl_shortcuts = {
+ "meta":
+ (r"//primitive\[@id='@@'\]/meta_attributes", r"/nvpair\[@name='@@'\]"),
+ "params":
+ (r"//primitive\[@id='@@'\]/instance_attributes", r"/nvpair\[@name='@@'\]"),
+ "utilization":
+ (r"//primitive\[@id='@@'\]/utilization",),
+ "location":
+ (r"//rsc_location\[@id='cli-prefer-@@' and @rsc='@@'\]",),
+ "property":
+ (r"//crm_config/cluster_property_set", r"/nvpair\[@name='@@'\]"),
+ "nodeattr":
+ (r"//nodes/node/instance_attributes", r"/nvpair\[@name='@@'\]"),
+ "nodeutil":
+ (r"//nodes/node/utilization", r"\[@uname='@@'\]"),
+ "node":
+ (r"//nodes/node", r"\[@uname='@@'\]"),
+ "status":
+ (r"/cib/status",),
+ "cib":
+ (r"/cib",),
+}
+lrm_exit_codes = {
+ "success": "0",
+ "unknown": "1",
+ "args": "2",
+ "unimplemented": "3",
+ "perm": "4",
+ "installed": "5",
+ "configured": "6",
+ "not_running": "7",
+ "master": "8",
+ "failed_master": "9",
+}
+lrm_status_codes = {
+ "pending": "-1",
+ "done": "0",
+ "cancelled": "1",
+ "timeout": "2",
+ "notsupported": "3",
+ "error": "4",
+}
+cib_user_attrs = ("validate-with",)
+node_states = ("online", "offline", "unclean")
+precious_attrs = ("id-ref",)
+op_extra_attrs = ("interval",)
+rsc_meta_attributes = (
+ "allow-migrate", "maintenance", "is-managed", "interval-origin",
+ "migration-threshold", "priority", "multiple-active",
+ "failure-timeout", "resource-stickiness", "target-role",
+ "restart-type", "description", "remote-node", "requires",
+ "provides", "remote-port", "remote-addr", "remote-connect-timeout",
+ "critical", "allow-unhealthy-nodes", "container-attribute-target"
+)
+common_meta_attributes = ("priority", "target-role", "is-managed")
+group_meta_attributes = common_meta_attributes + ("container", )
+clone_meta_attributes = common_meta_attributes + (
+ "ordered", "notify", "interleave", "globally-unique",
+ "clone-max", "clone-node-max", "clone-state", "description",
+ "clone-min", "promotable", "promoted-max", "promoted-node-max",
+)
+ms_meta_attributes = common_meta_attributes + (
+ "clone-max", "clone-node-max", "notify", "globally-unique", "ordered",
+ "interleave", "master-max", "master-node-max", "description",
+)
+bundle_meta_attributes = common_meta_attributes
+alert_meta_attributes = (
+ "timeout", "timestamp-format"
+)
+trace_ra_attr = "trace_ra"
+trace_dir_attr = "trace_dir"
+score_types = {'advisory': '0', 'mandatory': 'INFINITY'}
+boolean_ops = ('or', 'and')
+binary_ops = ('lt', 'gt', 'lte', 'gte', 'eq', 'ne')
+binary_types = ('string', 'version', 'number')
+unary_ops = ('defined', 'not_defined')
+simple_date_ops = ('lt', 'gt')
+date_ops = ('lt', 'gt', 'in_range', 'date_spec')
+date_spec_names = '''hours monthdays weekdays yearsdays months \
+weeks years weekyears moon'''.split()
+in_range_attrs = ('start', 'end')
+node_default_type = "normal"
+node_attributes_keyw = ("attributes", "utilization")
+shadow_envvar = "CIB_shadow"
+attr_defaults = {
+ "node": {"type": "normal"},
+ "resource_set": {"sequential": "true", "require-all": "true"},
+ "rule": {"boolean-op": "and"},
+}
+cib_no_section_rc = 6
+# Graphviz attributes for various CIB elements.
+# Shared for edge and node and graph attributes.
+# Keys are graphviz attributes, values are dicts where keys
+# are CIB element names and values graphviz values.
+# - element "." refers to the whole graph
+# - element "class:<ra_class>" refers to primitives of a
+# specific RA class
+# - optional_set is a resource_set with require-all set to
+# false
+# - group and optional_set are subgraphs (boxes)
+graph = {
+ ".": {
+ "compound": "true",
+ },
+ "*": {
+ "fontname": "Helvetica",
+ "fontsize": "11",
+ },
+ "node": {
+ "style": "bold",
+ "shape": "box",
+ "color": "#7ac142",
+ },
+ "primitive": {
+ "fillcolor": "#e4e5e6",
+ "color": "#b9b9b9",
+ "shape": "box",
+ "style": "rounded,filled",
+ },
+ "rsc_template": {
+ "fillcolor": "#ffd457",
+ "color": "#b9b9b9",
+ "shape": "box",
+ "style": "rounded,filled,dashed",
+ },
+ "class:stonith": {
+ "shape": "box",
+ "style": "dashed",
+ },
+ "location": {
+ "style": "dashed",
+ "dir": "none",
+ },
+ "clone": {
+ "color": "#ec008c",
+ },
+ "ms": {
+ "color": "#f8981d",
+ },
+ "bundle": {
+ "color": "#00aeef",
+ "style": "rounded",
+ },
+ "group": {
+ "color": "#00aeef",
+ "group": "#00aeef",
+ "labelloc": "b",
+ "labeljust": "r",
+ "labelfontsize": "12",
+ },
+ "optional_set": {
+ "style": "dotted",
+ },
+ "template:edge": {
+ "color": "#b9b9b9",
+ "style": "dotted",
+ "arrowtail": "open",
+ "dir": "back",
+ },
+}
+
+need_reset = False
+prompt = ''
+tmp_cib = False
+tmp_cib_prompt = "@tmp@"
+live_cib_prompt = "live"
+
+simulate_programs = {
+ "ptest": "ptest",
+ "simulate": "crm_simulate",
+}
+
+meta_progs = ("crmd", "pengine", "stonithd", "cib")
+meta_progs_20 = ("pacemaker-controld", "pacemaker-schedulerd", "pacemaker-fenced", "pacemaker-based")
+
+# elide these properties from tab completion
+crmd_metadata_do_not_complete = ("dc-version",
+ "cluster-infrastructure",
+ "crmd-integration-timeout",
+ "crmd-finalization-timeout",
+ "expected-quorum-votes")
+extra_cluster_properties = ("dc-version",
+ "cluster-infrastructure",
+ "last-lrm-refresh",
+ "cluster-name")
+pcmk_version = "" # set later
+
+container_type = ["docker", "rkt"]
+container_helptxt = {
+ "docker": {
+ "image": """image:(string)
+ Docker image tag(required)""",
+
+ "replicas": """replicas:(integer)
+ Default:Value of masters if that is positive, else 1
+ A positive integer specifying the number of container instances to launch""",
+
+ "replicas-per-host": """replicas-per-host:(integer)
+ Default:1
+ A positive integer specifying the number of container instances allowed to
+ run on a single node""",
+
+ "masters": """masters:(integer)
+ Default:0
+ A non-negative integer that, if positive, indicates that the containerized
+ service should be treated as a multistate service, with this many replicas
+ allowed to run the service in the master role""",
+
+ "run-command": """run-command:(string)
+ Default:/usr/sbin/pacemaker_remoted if bundle contains a primitive, otherwise none
+ This command will be run inside the container when launching it ("PID 1").
+ If the bundle contains a primitive, this command must start pacemaker_remoted
+ (but could, for example, be a script that does other stuff, too).""",
+
+ "options": """options:(string)
+ Extra command-line options to pass to docker run"""
+ },
+
+ "network": {
+ "ip-range-start": """ip-range-start:(IPv4 address)
+ If specified, Pacemaker will create an implicit ocf:heartbeat:IPaddr2 resource
+ for each container instance, starting with this IP address, using up to replicas
+ sequential addresses. These addresses can be used from the host’s network to
+ reach the service inside the container, though it is not visible within the
+ container itself. Only IPv4 addresses are currently supported.""",
+
+ "host-netmask": """host-netmask:(integer)
+ Default:32
+ If ip-range-start is specified, the IP addresses are created with this CIDR
+ netmask (as a number of bits).""",
+
+ "host-interface": """host-interface:(string)
+ If ip-range-start is specified, the IP addresses are created on this host
+ interface (by default, it will be determined from the IP address).""",
+
+ "control-port": """control-port:(integer)
+ Default: 3121
+ If the bundle contains a primitive, the cluster will use this integer TCP port
+ for communication with Pacemaker Remote inside the container. Changing this is
+ useful when the container is unable to listen on the default port, for example,
+ when the container uses the host’s network rather than ip-range-start (in which
+ case replicas-per-host must be 1), or when the bundle may run on a Pacemaker
+ Remote node that is already listening on the default port. Any PCMK_remote_port
+ environment variable set on the host or in the container is ignored for bundle
+ connections.""",
+
+ "port-mapping": {
+ "id": """id:(string)
+ A unique name for the port mapping (required)""",
+
+ "port": """port:(integer)
+ If this is specified, connections to this TCP port number on the host network
+ (on the container’s assigned IP address, if ip-range-start is specified) will
+ be forwarded to the container network. Exactly one of port or range must be
+ specified in a port-mapping.""",
+
+ "internal-port": """internal-port:(integer)
+ Default: value of port
+ If port and this are specified, connections to port on the host’s network will
+ be forwarded to this port on the container network.""",
+
+ "range": """range:(first_port-last_port)
+ If this is specified, connections to these TCP port numbers (expressed as
+ first_port-last_port) on the host network (on the container’s assigned IP address,
+ if ip-range-start is specified) will be forwarded to the same ports in the container
+ network. Exactly one of port or range must be specified in a port-mapping."""
+ }
+ },
+
+ "storage": {
+ "id": """id:(string)
+ A unique name for the storage mapping (required)""",
+
+ "source-dir": """source-dir:(string)
+ The absolute path on the host’s filesystem that will be mapped into the container.
+ Exactly one of source-dir and source-dir-root must be specified in a storage-mapping.""",
+
+ "source-dir-root": """source-dir-root:(string)
+ The start of a path on the host’s filesystem that will be mapped into the container,
+ using a different subdirectory on the host for each container instance. The subdirectory
+ will be named the same as the bundle host name, as described in the note for ip-range-start.
+ Exactly one of source-dir and source-dir-root must be specified in a storage-mapping.""",
+
+ "target-dir": """target-dir:(string)
+ The path name within the container where the host storage will be mapped (required)""",
+
+ "options": """options:(string)
+ File system mount options to use when mapping the storage"""
+ },
+
+ "rkt": {
+ "image": """image:(string)
+ Container image tag (required)""",
+
+ "replicas": """replicas:(integer)
+ Default:Value of masters if that is positive, else 1
+ A positive integer specifying the number of container instances to launch""",
+
+ "replicas-per-host": """replicas-per-host:(interval)
+ Default:1
+ A positive integer specifying the number of container instances allowed to
+ run on a single node""",
+
+ "masters": """masters:(integer)
+ Default:0
+ A non-negative integer that, if positive, indicates that the containerized
+ service should be treated as a multistate service, with this many replicas
+ allowed to run the service in the master role""",
+
+ "run-command": """run-command:(string)
+ Default:/usr/sbin/pacemaker_remoted if bundle contains a primitive, otherwise none
+ This command will be run inside the container when launching it ("PID 1").
+ If the bundle contains a primitive, this command must start pacemaker_remoted
+ (but could, for example, be a script that does other stuff, too).""",
+
+ "options": """options:(string)
+ Extra command-line options to pass to rkt run"""
+ }
+}
+
+
+QDEVICE_HELP_INFO = """ QDevice participates in quorum decisions. With the assistance of
+ a third-party arbitrator Qnetd, it provides votes so that a cluster
+ is able to sustain more node failures than standard quorum rules
+ allow. It is recommended for clusters with an even number of nodes
+ and highly recommended for 2 node clusters."""
+
+
+SSH_OPTION_ARGS = ["-o", "StrictHostKeyChecking=no"]
+SSH_OPTION = ' '.join(SSH_OPTION_ARGS)
+
+
+CLOUD_AWS = "amazon-web-services"
+CLOUD_AZURE = "microsoft-azure"
+CLOUD_GCP = "google-cloud-platform"
+
+
+RED = '\033[31m'
+YELLOW = '\033[33m'
+GREEN = '\033[32m'
+END = '\033[0m'
+
+
+CIB_QUERY = "cibadmin -Q"
+CIB_UPGRADE = "crm configure upgrade force"
+CIB_RAW_FILE = "/var/lib/pacemaker/cib/cib.xml"
+XML_NODE_PATH = "/cib/configuration/nodes/node"
+XML_STATUS_PATH = "/cib/status/node_state"
+XML_NODE_QUERY_STANDBY_PATH = "//nodes/node[@id='{node_id}']/instance_attributes/nvpair[@name='standby']"
+XML_STATUS_QUERY_STANDBY_PATH = "//status/node_state[@id='{node_id}']/transient_attributes/instance_attributes/nvpair[@name='standby']"
+CRM_MON_ONE_SHOT = "crm_mon -1"
+CRM_MON_XML_OUTPUT= "crm_mon --output-as=xml"
+STONITH_TIMEOUT_DEFAULT = 60
+PCMK_DELAY_MAX = 30
+DLM_CONTROLD_RA = "ocf::pacemaker:controld"
+LVMLOCKD_RA = "ocf::heartbeat:lvmlockd"
+HA_USER = "hacluster"
+HA_GROUP = "haclient"
+SCHEMA_MIN_VER_SUPPORT_OCF_1_1 = "pacemaker-3.7"
+REJOIN_COUNT = 60
+REJOIN_INTERVAL = 10
+DC_DEADTIME_DEFAULT = 20
+
+ADVISED_ACTION_LIST = ['monitor', 'start', 'stop', 'promote', 'demote']
+ADVISED_KEY_LIST = ['timeout', 'interval', 'role']
+DEFAULT_INTERVAL_IN_ACTION = "20s"
+WAIT_TIMEOUT_MS_DEFAULT = 120000
+CSYNC2_SERVICE = "csync2.socket"
+
+RSC_ROLE_PROMOTED = "Promoted"
+RSC_ROLE_UNPROMOTED = "Unpromoted"
+RSC_ROLE_PROMOTED_LEGACY = "Master"
+RSC_ROLE_UNPROMOTED_LEGACY = "Slave"
+PCMK_VERSION_DEFAULT = "2.0.0"
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/corosync.py b/crmsh/corosync.py
new file mode 100644
index 0000000..71d38b2
--- /dev/null
+++ b/crmsh/corosync.py
@@ -0,0 +1,784 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+'''
+Functions that abstract creating and editing the corosync.conf
+configuration file, and also the corosync-* utilities.
+'''
+
+import os
+import re
+import socket
+
+from . import utils, sh
+from . import tmpfiles
+from . import parallax
+from . import log
+from .sh import ShellUtils
+
+
+logger = log.setup_logger(__name__)
+
+
+COROSYNC_TOKEN_DEFAULT = 1000 # in ms units
+
+
+def conf():
+ return os.getenv('COROSYNC_MAIN_CONFIG_FILE', '/etc/corosync/corosync.conf')
+
+
+def check_tools():
+ return all(utils.is_program(p)
+ for p in ['corosync-cfgtool', 'corosync-quorumtool', 'corosync-cmapctl'])
+
+
+def cfgtool(*args):
+ return ShellUtils().get_stdout(['corosync-cfgtool'] + list(args), shell=False)
+
+
+def query_status(status_type):
+ """
+ Query status of corosync
+
+ Possible types could be ring/quorum/qdevice/qnetd
+ """
+ status_func_dict = {
+ "ring": query_ring_status,
+ "quorum": query_quorum_status,
+ "qdevice": query_qdevice_status,
+ "qnetd": query_qnetd_status
+ }
+ if status_type in status_func_dict:
+ status_func_dict[status_type]()
+ else:
+ raise ValueError("Wrong type \"{}\" to query status".format(status_type))
+
+
+def query_ring_status():
+ """
+ Query corosync ring status
+ """
+ rc, out, err = ShellUtils().get_stdout_stderr("corosync-cfgtool -s")
+ if rc != 0 and err:
+ raise ValueError(err)
+ if out:
+ print(out)
+
+
+def query_quorum_status():
+ """
+ Query corosync quorum status
+
+ """
+ utils.print_cluster_nodes()
+ rc, out, err = ShellUtils().get_stdout_stderr("corosync-quorumtool -s")
+ if rc != 0 and err:
+ raise ValueError(err)
+ # If the return code of corosync-quorumtool is 2,
+ # that means no problem appeared but node is not quorate
+ if rc in [0, 2] and out:
+ print(out)
+
+
+def query_qdevice_status():
+ """
+ Query qdevice status
+ """
+ if not utils.is_qdevice_configured():
+ raise ValueError("QDevice/QNetd not configured!")
+ cmd = "corosync-qdevice-tool -sv"
+ out = sh.cluster_shell().get_stdout_or_raise_error(cmd)
+ utils.print_cluster_nodes()
+ print(out)
+
+
+def query_qnetd_status():
+ """
+ Query qnetd status
+ """
+ import crmsh.bootstrap # workaround for circular dependencies
+ if not utils.is_qdevice_configured():
+ raise ValueError("QDevice/QNetd not configured!")
+ cluster_name = get_value('totem.cluster_name')
+ if not cluster_name:
+ raise ValueError("cluster_name not configured!")
+ qnetd_addr = get_value('quorum.device.net.host')
+ if not qnetd_addr:
+ raise ValueError("host for qnetd not configured!")
+
+ # Configure ssh passwordless to qnetd if detect password is needed
+ local_user, remote_user = utils.user_pair_for_ssh(qnetd_addr)
+ if utils.check_ssh_passwd_need(local_user, remote_user, qnetd_addr):
+ crmsh.bootstrap.configure_ssh_key(local_user)
+ utils.ssh_copy_id(local_user, remote_user, qnetd_addr)
+
+ cmd = "corosync-qnetd-tool -lv -c {}".format(cluster_name)
+ result = parallax.parallax_call([qnetd_addr], cmd)
+ _, qnetd_result_stdout, _ = result[0][1]
+ if qnetd_result_stdout:
+ utils.print_cluster_nodes()
+ print(utils.to_ascii(qnetd_result_stdout))
+
+
+def add_nodelist_from_cmaptool():
+ for nodeid, iplist in utils.get_nodeinfo_from_cmaptool().items():
+ try:
+ add_node_ucast(iplist, nodeid)
+ except IPAlreadyConfiguredError:
+ continue
+
+
+def is_unicast():
+ return get_value("totem.transport") == "udpu"
+
+
+_tCOMMENT = 0
+_tBEGIN = 1
+_tEND = 2
+_tVALUE = 3
+
+
+class Token(object):
+ def __init__(self, token, path, key=None, value=None):
+ self.token = token
+ self.path = '.'.join(path)
+ self.key = key
+ self.value = value
+
+ def __repr__(self):
+ if self.token == _tCOMMENT:
+ return self.key
+ elif self.token == _tBEGIN:
+ return "%s {" % (self.key)
+ elif self.token == _tEND:
+ return '}'
+ return '%s: %s' % (self.key, self.value)
+
+
+def corosync_tokenizer(stream):
+ """Parses the corosync config file into a token stream"""
+ section_re = re.compile(r'(\w+)\s*{')
+ value_re = re.compile(r'(\w+):\s*([\S ]+)')
+ path = []
+ while stream:
+ stream = stream.lstrip()
+ if not stream:
+ break
+ if stream[0] == '#':
+ end = stream.find('\n')
+ t = Token(_tCOMMENT, [], stream[:end])
+ stream = stream[end:]
+ yield t
+ continue
+ if stream[0] == '}':
+ t = Token(_tEND, [])
+ stream = stream[1:]
+ yield t
+ path = path[:-1]
+ continue
+ m = section_re.match(stream)
+ if m:
+ path.append(m.group(1))
+ t = Token(_tBEGIN, path, m.group(1))
+ stream = stream[m.end():]
+ yield t
+ continue
+ m = value_re.match(stream)
+ if m:
+ t = Token(_tVALUE, path + [m.group(1)], m.group(1), m.group(2))
+ stream = stream[m.end():]
+ yield t
+ continue
+ raise ValueError("Parse error at [..%s..]" % (stream[:16]))
+
+
+def make_section(path, contents=None):
+ "Create a token sequence representing a section"
+ if not contents:
+ contents = []
+ sp = path.split('.')
+ name = sp[-1]
+ for t in contents:
+ if t.path and not t.path.startswith(path):
+ raise ValueError("%s (%s) not in path %s" % (t.path, t.key, path))
+ return [Token(_tBEGIN, sp, name)] + contents + [Token(_tEND, [])]
+
+
+def make_value(path, value):
+ "Create a token sequence representing a value"
+ sp = path.split('.')
+ name = sp[-1]
+ return [Token(_tVALUE, sp, name, value)]
+
+
+class Parser(object):
+ def __init__(self, data):
+ self._tokens = list(corosync_tokenizer(data))
+
+ def find(self, name, start=0):
+ """Gets the index of the element with the given path"""
+ for i, t in enumerate(self._tokens[start:]):
+ if t.path == name:
+ return i + start
+ return -1
+
+ def find_bounds(self, name, start=0):
+ """find the (start, end) of the next instance of name found at start"""
+ i = self.find(name, start)
+ if i < 0:
+ return -1, -1
+ if self._tokens[i].token != _tBEGIN:
+ return i, i
+ e = i + 1
+ depth = 0
+ while e < len(self._tokens):
+ t = self._tokens[e]
+ if t.token == _tBEGIN:
+ depth += 1
+ if t.token == _tEND:
+ depth -= 1
+ if depth < 0:
+ break
+ e += 1
+ if e == len(self._tokens):
+ raise ValueError("Unclosed section")
+ return i, e
+
+ def get(self, path):
+ """Gets the value for the key (if any)"""
+ for t in self._tokens:
+ if t.token == _tVALUE and t.path == path:
+ return t.value
+ return None
+
+ def get_all(self, path):
+ """Returns all values matching path"""
+ ret = []
+ for t in self._tokens:
+ if t.token == _tVALUE and t.path == path:
+ ret.append(t.value)
+ return ret
+
+ def all_paths(self):
+ """Returns all value paths"""
+ ret = []
+ for t in self._tokens:
+ if t.token == _tVALUE:
+ ret.append(t.path)
+ return ret
+
+ def count(self, path):
+ """Returns the number of elements matching path"""
+ n = 0
+ for t in self._tokens:
+ if t.path == path:
+ n += 1
+ return n
+
+ def remove(self, path):
+ """Removes the given section or value"""
+ i, e = self.find_bounds(path)
+ if i < 0:
+ return
+ self._tokens = self._tokens[:i] + self._tokens[(e+1):]
+
+ def remove_section_where(self, path, key, value):
+ """
+ Remove section which contains key: value
+ Used to remove node definitions
+ """
+ nth = -1
+ start = 0
+ keypath = '.'.join([path, key])
+ while True:
+ nth += 1
+ i, e = self.find_bounds(path, start)
+ start = e + 1
+ if i < 0:
+ break
+ k = self.find(keypath, i)
+ if k < 0 or k > e:
+ continue
+ vt = self._tokens[k]
+ if vt.token == _tVALUE and vt.value == value:
+ self._tokens = self._tokens[:i] + self._tokens[(e+1):]
+ return nth
+ return -1
+
+ def add(self, path, tokens):
+ """Adds tokens to a section"""
+ logger.debug("corosync.add (%s) (%s)" % (path, tokens))
+ if not path:
+ self._tokens += tokens
+ return
+ start = self.find(path)
+ if start < 0:
+ return None
+ depth = 0
+ end = None
+ for i, t in enumerate(self._tokens[start + 1:]):
+ if t.token == _tBEGIN:
+ depth += 1
+ elif t.token == _tEND:
+ depth -= 1
+ if depth < 0:
+ end = start + i + 1
+ break
+ if end is None:
+ raise ValueError("Unterminated section at %s" % (start))
+ self._tokens = self._tokens[:end] + tokens + self._tokens[end:]
+
+ def set(self, path, value):
+ """Sets a key: value entry. sections are given
+ via dot-notation."""
+ i = self.find(path)
+ if i < 0:
+ spath = path.split('.')
+ return self.add('.'.join(spath[:-1]),
+ make_value(path, value))
+ if self._tokens[i].token != _tVALUE:
+ raise ValueError("%s is not a value" % (path))
+ self._tokens[i].value = value
+
+ def to_string(self):
+ '''
+ Serialize tokens into the corosync.conf
+ file format
+ '''
+ def joiner(tstream):
+ indent = 0
+ last = None
+ while tstream:
+ t = tstream[0]
+ if indent and t.token == _tEND:
+ indent -= 1
+ s = ''
+ if t.token == _tCOMMENT and (last and last.token != _tCOMMENT):
+ s += '\n'
+ s += ('\t'*indent) + str(t) + '\n'
+ if t.token == _tEND:
+ s += '\n'
+ yield s
+ if t.token == _tBEGIN:
+ indent += 1
+ last = t
+ tstream = tstream[1:]
+ return ''.join(joiner(self._tokens))
+
+
+def push_configuration(nodes):
+ '''
+ Push the local configuration to the list of remote nodes
+ '''
+ return utils.cluster_copy_file(conf(), nodes)
+
+
+def pull_configuration(from_node):
+ '''
+ Copy the configuration from the given node to this node
+ '''
+ local_path = conf()
+ _, fname = tmpfiles.create()
+ print("Retrieving %s:%s..." % (from_node, local_path))
+ cmd = ['scp', '-qC',
+ '-o', 'PasswordAuthentication=no',
+ '-o', 'StrictHostKeyChecking=no',
+ '%s:%s' % (from_node, local_path),
+ fname]
+ rc = utils.ext_cmd_nosudo(cmd, shell=False)
+ if rc == 0:
+ data = open(fname).read()
+ newhash = hash(data)
+ if os.path.isfile(local_path):
+ oldata = open(local_path).read()
+ oldhash = hash(oldata)
+ if newhash == oldhash:
+ print("No change.")
+ return
+ print("Writing %s:%s..." % (utils.this_node(), local_path))
+ local_file = open(local_path, 'w')
+ local_file.write(data)
+ local_file.close()
+ else:
+ raise ValueError("Failed to retrieve %s from %s" % (local_path, from_node))
+
+
+def diff_configuration(nodes, checksum=False):
+ local_path = conf()
+ this_node = utils.this_node()
+ nodes = list(nodes)
+ if checksum:
+ utils.remote_checksum(local_path, nodes, this_node)
+ elif len(nodes) == 1:
+ utils.remote_diff_this(local_path, nodes, this_node)
+ elif this_node in nodes:
+ nodes.remove(this_node)
+ utils.remote_diff_this(local_path, nodes, this_node)
+ elif nodes:
+ utils.remote_diff(local_path, nodes)
+
+
+def get_free_nodeid(parser):
+ ids = parser.get_all('nodelist.node.nodeid')
+ if not ids:
+ return 1
+ ids = [int(i) for i in ids]
+ max_id = max(ids) + 1
+ for i in range(1, max_id):
+ if i not in ids:
+ return i
+ return max_id
+
+
+def get_ip(node):
+ try:
+ return socket.gethostbyname(node)
+ except socket.error:
+ return None
+
+
+def get_all_paths():
+ p = Parser(utils.read_from_file(conf()))
+ return p.all_paths()
+
+
+def get_value(path):
+ p = Parser(utils.read_from_file(conf()))
+ return p.get(path)
+
+
+def get_values(path):
+ p = Parser(utils.read_from_file(conf()))
+ return p.get_all(path)
+
+
+def set_value(path, value):
+ p = Parser(utils.read_from_file(conf()))
+ p.set(path, value)
+ utils.str2file(p.to_string(), conf())
+
+
+class IPAlreadyConfiguredError(Exception):
+ pass
+
+
+def find_configured_ip(ip_list):
+ """
+ find if the same IP already configured
+ If so, raise IPAlreadyConfiguredError
+ """
+ p = Parser(utils.read_from_file(conf()))
+ # get exist ip list from corosync.conf
+ corosync_iplist = []
+ for path in set(p.all_paths()):
+ if re.search('nodelist.node.ring[0-9]*_addr', path):
+ corosync_iplist.extend(p.get_all(path))
+
+ # all_possible_ip is a ip set to check whether one of them already configured
+ all_possible_ip = set(ip_list)
+ # get local ip list
+ is_ipv6 = utils.IP.is_ipv6(ip_list[0])
+ local_ip_list = utils.InterfacesInfo.get_local_ip_list(is_ipv6)
+ # extend all_possible_ip if ip_list contain local ip
+ # to avoid this scenarios in join node:
+ # eth0's ip already configured in corosync.conf
+ # eth1's ip also want to add in nodelist
+ # if this scenarios happened, raise IPAlreadyConfiguredError
+ if bool(set(ip_list) & set(local_ip_list)):
+ all_possible_ip |= set(local_ip_list)
+ configured_ip = list(all_possible_ip & set(corosync_iplist))
+ if configured_ip:
+ raise IPAlreadyConfiguredError("IP {} was already configured".format(','.join(configured_ip)))
+
+
+def add_node_ucast(ip_list, node_id=None):
+
+ find_configured_ip(ip_list)
+
+ p = Parser(utils.read_from_file(conf()))
+ if node_id is None:
+ node_id = get_free_nodeid(p)
+ node_value = []
+ for i, addr in enumerate(ip_list):
+ node_value += make_value('nodelist.node.ring{}_addr'.format(i), addr)
+ node_value += make_value('nodelist.node.nodeid', str(node_id))
+
+ if get_values("nodelist.node.ring0_addr") == []:
+ p.add('', make_section('nodelist', []))
+ p.add('nodelist', make_section('nodelist.node', node_value))
+
+ num_nodes = p.count('nodelist.node')
+ p.set('quorum.two_node', '1' if num_nodes == 2 else '0')
+ if p.get("quorum.device.model") == "net":
+ p.set('quorum.two_node', '0')
+
+ utils.str2file(p.to_string(), conf())
+
+
+def add_node(addr, name=None):
+ '''
+ Add node to corosync.conf
+ '''
+ coronodes = None
+ nodes = None
+ nodenames = None
+ coronodes = utils.list_corosync_nodes()
+ nodenames = utils.list_corosync_node_names()
+ try:
+ nodes = utils.list_cluster_nodes()
+ except Exception:
+ nodes = []
+ ipaddr = get_ip(addr)
+ if addr in nodenames + coronodes or (ipaddr and ipaddr in coronodes):
+ logger.warning("%s already in corosync.conf" % (addr))
+ return
+ if name and name in nodenames + coronodes:
+ logger.warning("%s already in corosync.conf" % (name))
+ return
+ if addr in nodes:
+ logger.warning("%s already in configuration" % (addr))
+ return
+ if name and name in nodes:
+ logger.warning("%s already in configuration" % (name))
+ return
+
+ p = Parser(utils.read_from_file(conf()))
+
+ node_addr = addr
+ node_id = get_free_nodeid(p)
+ node_name = name
+ node_value = (make_value('nodelist.node.ring0_addr', node_addr) +
+ make_value('nodelist.node.nodeid', str(node_id)))
+ if node_name:
+ node_value += make_value('nodelist.node.name', node_name)
+
+ p.add('nodelist', make_section('nodelist.node', node_value))
+
+ num_nodes = p.count('nodelist.node')
+ p.set('quorum.two_node', '1' if num_nodes == 2 else '0')
+ if p.get("quorum.device.model") == "net":
+ p.set('quorum.two_node', '0')
+
+ utils.str2file(p.to_string(), conf())
+
+ # update running config (if any)
+ if nodes:
+ utils.ext_cmd(["corosync-cmapctl",
+ "-s", "nodelist.node.%s.nodeid" % (num_nodes - 1),
+ "u32", str(node_id)], shell=False)
+ utils.ext_cmd(["corosync-cmapctl",
+ "-s", "nodelist.node.%s.ring0_addr" % (num_nodes - 1),
+ "str", node_addr], shell=False)
+ if node_name:
+ utils.ext_cmd(["corosync-cmapctl",
+ "-s", "nodelist.node.%s.name" % (num_nodes - 1),
+ "str", node_name], shell=False)
+
+
+def del_node(addr):
+ '''
+ Remove node from corosync
+ '''
+ p = Parser(utils.read_from_file(conf()))
+ nth = p.remove_section_where('nodelist.node', 'ring0_addr', addr)
+ if nth == -1:
+ return
+
+ num_nodes = p.count('nodelist.node')
+ p.set('quorum.two_node', '1' if num_nodes == 2 else '0')
+ if p.get("quorum.device.model") == "net":
+ p.set('quorum.two_node', '0')
+
+ utils.str2file(p.to_string(), conf())
+
+
+_COROSYNC_CONF_TEMPLATE_HEAD = """# Please read the corosync.conf.5 manual page
+
+totem {
+ version: 2
+ cluster_name: %(clustername)s
+ clear_node_high_bit: yes
+"""
+_COROSYNC_CONF_TEMPLATE_TAIL = """
+ %(rrp_mode)s
+ %(transport)s
+ %(ipv6)s
+ %(ipv6_nodeid)s
+}
+
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: yes
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+
+%(nodelist)s
+%(quorum)s
+"""
+_COROSYNC_CONF_TEMPLATE_RING = """
+ interface {
+ ringnumber: %(number)d
+ %(bindnetaddr)s
+%(mcast)s
+ ttl: 1
+ }
+"""
+
+
+def create_configuration(clustername="hacluster",
+ bindnetaddr=None,
+ mcastaddr=None,
+ mcastport=None,
+ ringXaddr=None,
+ transport=None,
+ ipv6=False,
+ nodeid=None,
+ two_rings=False,
+ qdevice=None):
+
+ if transport == "udpu":
+ ring_tmpl = ""
+ for i in 0, 1:
+ ring_tmpl += " ring{}_addr: {}\n".format(i, ringXaddr[i])
+ if not two_rings:
+ break
+
+ nodelist_tmpl = """nodelist {
+ node {
+%(ringaddr)s
+ nodeid: 1
+ }
+}
+""" % {"ringaddr": ring_tmpl}
+ else:
+ nodelist_tmpl = ""
+
+ transport_tmpl = ""
+ if transport is not None:
+ transport_tmpl = "transport: {}\n".format(transport)
+
+ rrp_mode_tmp = ""
+ if two_rings:
+ rrp_mode_tmp = "rrp_mode: passive"
+
+ ipv6_tmpl = ""
+ ipv6_nodeid = ""
+ if ipv6:
+ ipv6_tmpl = "ip_version: ipv6"
+ if transport != "udpu":
+ ipv6_nodeid = "nodeid: %d" % nodeid
+
+ quorum_tmpl = """quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 1
+ two_node: 0
+}
+"""
+ if qdevice is not None:
+ quorum_tmpl = """quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 1
+ two_node: 0
+ device {
+ votes: 0
+ model: net
+ net {
+ tls: %(tls)s
+ host: %(ip)s
+ port: %(port)s
+ algorithm: %(algo)s
+ tie_breaker: %(tie_breaker)s
+ }
+ }
+}
+""" % qdevice.__dict__
+
+ config_common = {
+ "clustername": clustername,
+ "nodelist": nodelist_tmpl,
+ "quorum": quorum_tmpl,
+ "ipv6": ipv6_tmpl,
+ "ipv6_nodeid": ipv6_nodeid,
+ "rrp_mode": rrp_mode_tmp,
+ "transport": transport_tmpl
+ }
+
+ _COROSYNC_CONF_TEMPLATE_RING_ALL = ""
+ mcast_tmp = []
+ bindnetaddr_tmp = []
+ config_ring = []
+ for i in 0, 1:
+ mcast_tmp.append("")
+ if mcastaddr is not None:
+ mcast_tmp[i] += " mcastaddr: {}\n".format(mcastaddr[i])
+ if mcastport is not None:
+ mcast_tmp[i] += " mcastport: {}".format(mcastport[i])
+
+ bindnetaddr_tmp.append("")
+ if bindnetaddr is None:
+ bindnetaddr_tmp[i] = ""
+ else:
+ bindnetaddr_tmp[i] = "bindnetaddr: {}".format(bindnetaddr[i])
+
+ config_ring.append("")
+ config_ring[i] = {
+ "bindnetaddr": bindnetaddr_tmp[i],
+ "mcast": mcast_tmp[i],
+ "number": i
+ }
+ _COROSYNC_CONF_TEMPLATE_RING_ALL += _COROSYNC_CONF_TEMPLATE_RING % config_ring[i]
+
+ if not two_rings:
+ break
+
+ _COROSYNC_CONF_TEMPLATE = _COROSYNC_CONF_TEMPLATE_HEAD + \
+ _COROSYNC_CONF_TEMPLATE_RING_ALL + \
+ _COROSYNC_CONF_TEMPLATE_TAIL
+ utils.str2file(_COROSYNC_CONF_TEMPLATE % config_common, conf())
+
+
+def get_corosync_value(key):
+ """
+ Get corosync configuration value from corosync-cmapctl or corosync.conf
+ """
+ try:
+ out = sh.cluster_shell().get_stdout_or_raise_error("corosync-cmapctl {}".format(key))
+ res = re.search(r'{}\s+.*=\s+(.*)'.format(key), out)
+ return res.group(1) if res else None
+ except ValueError:
+ out = get_value(key)
+ return out
+
+
+def get_corosync_value_dict():
+ """
+ Get corosync value, then return these values as dict
+ """
+ value_dict = {}
+
+ token = get_corosync_value("totem.token")
+ value_dict["token"] = int(int(token)/1000) if token else int(COROSYNC_TOKEN_DEFAULT/1000)
+
+ consensus = get_corosync_value("totem.consensus")
+ value_dict["consensus"] = int(int(consensus)/1000) if consensus else int(value_dict["token"]*1.2)
+
+ return value_dict
+
+
+def token_and_consensus_timeout():
+ """
+ Get corosync token plus consensus timeout
+ """
+ _dict = get_corosync_value_dict()
+ return _dict["token"] + _dict["consensus"]
diff --git a/crmsh/crash_test/__init__.py b/crmsh/crash_test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crmsh/crash_test/__init__.py
diff --git a/crmsh/crash_test/check.py b/crmsh/crash_test/check.py
new file mode 100644
index 0000000..fdd2ffe
--- /dev/null
+++ b/crmsh/crash_test/check.py
@@ -0,0 +1,320 @@
+import re
+import os
+
+from crmsh import utils as crmshutils
+from crmsh import bootstrap as crmshboot
+from crmsh import completers
+
+from . import utils
+from . import task
+from . import config
+from ..service_manager import ServiceManager
+from ..sh import ShellUtils
+
+
+def fix(context):
+ """
+ Check configuration and fix the abnormal options
+ """
+ if context.check_conf:
+ candidate = check_sbd()
+ if candidate != "":
+ correct_sbd(context, candidate)
+ print()
+
+
+def check_sbd():
+ """
+ Check the sbd device and find a possible fix for incorrect disk
+
+ Only support one path SBD_DEVICE in the current version
+ """
+ print("\n============ Checking the SBD device ============")
+ task_inst = task.TaskCheck("Checking SBD device")
+
+ with task_inst.run():
+
+ if not os.path.exists(config.SBD_CONF):
+ task_inst.info("SBD configuration file {} not found.".
+ format(config.SBD_CONF))
+ return ""
+
+ sbd_options = crmshutils.parse_sysconfig(config.SBD_CONF)
+
+ if not "SBD_DEVICE" in sbd_options:
+ task_inst.info("SBD DEVICE not used.")
+ return ""
+
+ dev = sbd_options["SBD_DEVICE"]
+
+ if not os.path.exists(dev):
+ task_inst.warn("SBD device '{}' is not exist.".format(dev))
+ else:
+ if utils.is_valid_sbd(dev):
+ task_inst.info("'{}' is a valid SBD device.".format(dev))
+ return ""
+ else:
+ task_inst.warn("Device '{}' is not valid for SBD, may need initialize."
+ .format(dev))
+
+ candidate = utils.find_candidate_sbd(dev)
+
+ if candidate == "":
+ task_inst.warn("Fail to find a valid candidate SBD device.")
+ return ""
+
+ task_inst.info("Found '{}' with SBD header exist.".format(candidate))
+
+ return candidate
+
+
+def correct_sbd(context, can):
+ """
+ Fix the sbd device conf with candidate device
+
+ Only support one path SBD_DEVICE in the current version
+ """
+
+ task_inst = task.TaskFixSBD(can, context.force)
+ try:
+ task_inst.pre_check()
+ task_inst.print_header()
+ with task_inst.backup():
+ task_inst.run()
+ task_inst.verify()
+ except task.TaskError as err:
+ task_inst.error(str(err))
+ raise crmshutils.TerminateSubCommand
+
+
+def check(context):
+ """
+ Check environment and cluster state if related options are enabled
+ """
+ if context.cluster_check:
+ check_cluster()
+ print()
+
+
+def check_environment():
+ """
+ A set of functions to check environment
+ """
+ print("\n============ Checking environment ============")
+ check_my_hostname_resolves()
+ check_time_service()
+ check_firewall()
+
+
+def check_my_hostname_resolves():
+ """
+ check if the hostname is resolvable
+ """
+ task_inst = task.TaskCheck("Checking hostname resolvable")
+ with task_inst.run():
+ if not crmshboot.my_hostname_resolves():
+ task_inst.error('''Hostname "{}" is unresolvable.
+ Please add an entry to /etc/hosts or configure DNS.'''.format(utils.this_node()))
+
+
+def check_time_service():
+ """
+ Check time service
+ """
+ task_inst = task.TaskCheck("Checking time service")
+ with task_inst.run():
+ service_manager = ServiceManager()
+ timekeepers = ('chronyd.service', 'ntp.service', 'ntpd.service')
+ timekeeper = None
+ for tk in timekeepers:
+ if service_manager.service_is_available(tk):
+ timekeeper = tk
+ break
+ else:
+ task_inst.warn("No NTP service found.")
+ return
+
+ task_inst.info("{} is available".format(timekeeper))
+ if service_manager.service_is_enabled(timekeeper):
+ task_inst.info("{} is enabled".format(timekeeper))
+ else:
+ task_inst.warn("{} is disabled".format(timekeeper))
+ if service_manager.service_is_active(timekeeper):
+ task_inst.info("{} is active".format(timekeeper))
+ else:
+ task_inst.warn("{} is not active".format(timekeeper))
+
+
+def check_port_open(task, firewall_type):
+ """
+ Check whether corosync port is blocked by iptables
+ """
+ ports = utils.corosync_port_list()
+ if not ports:
+ task.error("Can not get corosync's port")
+ return
+
+ if firewall_type == "firewalld":
+ rc, out, err = ShellUtils().get_stdout_stderr('firewall-cmd --list-port')
+ if rc != 0:
+ task.error(err)
+ return
+ for p in ports:
+ if re.search(' {}/udp'.format(p), out):
+ task.info("UDP port {} is opened in firewalld".format(p))
+ else:
+ task.error("UDP port {} should open in firewalld".format(p))
+ elif firewall_type == "SuSEfirewall2":
+ #TODO
+ pass
+
+
+def check_firewall():
+ """
+ Check the firewall status
+ """
+ task_inst = task.TaskCheck("Checking firewall")
+ with task_inst.run():
+ for item in ("firewalld", "SuSEfirewall2"):
+ if crmshutils.package_is_installed(item):
+ task_inst.info("{}.service is available".format(item))
+ if ServiceManager().service_is_active(item):
+ task_inst.info("{}.service is active".format(item))
+ check_port_open(task_inst, item)
+ else:
+ task_inst.warn("{}.service is not active".format(item))
+ break
+ else:
+ task_inst.warn("Failed to detect firewall")
+
+
+def check_cluster():
+ """
+ A set of functions to check cluster state
+ """
+ print("\n============ Checking cluster state ============")
+ if not check_cluster_service():
+ return
+ check_fencing()
+ check_nodes()
+ check_resources()
+
+
+def check_cluster_service(quiet=False):
+ """
+ Check service status of pacemaker/corosync
+ """
+ task_inst = task.TaskCheck("Checking cluster service", quiet=quiet)
+ with task_inst.run():
+ service_manager = ServiceManager()
+ if service_manager.service_is_enabled("pacemaker"):
+ task_inst.info("pacemaker.service is enabled")
+ else:
+ task_inst.warn("pacemaker.service is disabled")
+
+ if service_manager.service_is_enabled("corosync"):
+ task_inst.warn("corosync.service is enabled")
+
+ for s in ("corosync", "pacemaker"):
+ if service_manager.service_is_active(s):
+ task_inst.info("{}.service is running".format(s))
+ else:
+ task_inst.error("{}.service is not running!".format(s))
+ return task_inst.passed
+
+
+def check_fencing():
+ """
+ Check STONITH/Fence:
+ Whether stonith is enabled
+ Whether stonith resource is configured and running
+ """
+ task_inst = task.TaskCheck("Checking STONITH/Fence")
+ with task_inst.run():
+ if not utils.FenceInfo().fence_enabled:
+ task_inst.warn("stonith is disabled")
+ return
+
+ task_inst.info("stonith is enabled")
+ rc, outp, _ = ShellUtils().get_stdout_stderr("crm_mon -r1 | grep '(stonith:.*):'")
+ if rc != 0:
+ task_inst.warn("No stonith resource configured!")
+ return
+
+ res = re.search(r'([^\s]+)\s+\(stonith:(.*)\):\s+(\w+)', outp)
+ res_name, res_agent, res_state = res.groups()
+ common_msg = "stonith resource {}({})".format(res_name, res_agent)
+ state_msg = "{} is {}".format(common_msg, res_state)
+
+ task_inst.info("{} is configured".format(common_msg))
+ if res_state == "Started":
+ task_inst.info(state_msg)
+ else:
+ task_inst.warn(state_msg)
+
+ if re.search(r'sbd$', res_agent):
+ if ServiceManager().service_is_active("sbd"):
+ task_inst.info("sbd service is running")
+ else:
+ task_inst.warn("sbd service is not running!")
+
+
+def check_nodes():
+ """
+ Check nodes info:
+ Current DC
+ Quorum status
+ Online/OFFLINE/UNCLEAN nodes
+ """
+ task_inst = task.TaskCheck("Checking nodes")
+ with task_inst.run():
+ rc, outp, errp = ShellUtils().get_stdout_stderr("crm_mon -1")
+ if rc != 0:
+ task_inst.error("run \"crm_mon -1\" error: {}".format(errp))
+ return
+ # check DC
+ res = re.search(r'Current DC: (.*) \(', outp)
+ if res:
+ task_inst.info("DC node: {}".format(res.group(1)))
+
+ # check quorum
+ if re.search(r'partition with quorum', outp):
+ task_inst.info("Cluster have quorum")
+ else:
+ task_inst.warn("Cluster lost quorum!")
+
+ # check Online nodes
+ res = re.search(r'Online:\s+(\[.*\])', outp)
+ if res:
+ task_inst.info("Online nodes: {}".format(res.group(1)))
+
+ # check OFFLINE nodes
+ res = re.search(r'OFFLINE:\s+(\[.*\])', outp)
+ if res:
+ task_inst.warn("OFFLINE nodes: {}".format(res.group(1)))
+
+ # check UNCLEAN nodes
+ res = re.findall(r'Node (.*): UNCLEAN', outp)
+ for item in res:
+ task_inst.warn('Node {} is UNCLEAN!'.format(item))
+
+
+def check_resources():
+ """
+ Check items of Started/Stopped/FAILED resources
+ """
+ task_inst = task.TaskCheck("Checking resources")
+ with task_inst.run():
+ started_list = completers.resources_started()
+ stopped_list = completers.resources_stopped()
+ # TODO need suitable method to get failed resources list
+ failed_list = []
+ if started_list:
+ task_inst.info("Started resources: {}".format(','.join(started_list)))
+ if stopped_list:
+ task_inst.info("Stopped resources: {}".format(','.join(stopped_list)))
+ if failed_list:
+ task_inst.warn("Failed resources: {}".format(','.join(failed_list)))
+
+ if not (started_list or stopped_list or failed_list):
+ task_inst.info("No resources configured")
diff --git a/crmsh/crash_test/config.py b/crmsh/crash_test/config.py
new file mode 100644
index 0000000..64b4e72
--- /dev/null
+++ b/crmsh/crash_test/config.py
@@ -0,0 +1,9 @@
+FENCE_TIMEOUT = 60
+FENCE_NODE = "crm_attribute -t status -N '{}' -n terminate -v true"
+BLOCK_IP = '''iptables -{action} INPUT -s {peer_ip} -j DROP;
+ iptables -{action} OUTPUT -d {peer_ip} -j DROP'''
+REMOVE_PORT = "firewall-cmd --zone=public --remove-port={port}/udp"
+ADD_PORT = "firewall-cmd --zone=public --add-port={port}/udp"
+FENCE_HISTORY = "stonith_admin -h {node}"
+SBD_CONF = "/etc/sysconfig/sbd"
+SBD_CHECK_CMD = "sbd -d {dev} dump"
diff --git a/crmsh/crash_test/explain.py b/crmsh/crash_test/explain.py
new file mode 100644
index 0000000..08b6ab8
--- /dev/null
+++ b/crmsh/crash_test/explain.py
@@ -0,0 +1,31 @@
+contents = {}
+
+contents["sbd"]= '''On {nodeA}, once the sbd process get killed, there are two situations:
+ a) sbd process restarted
+ Systemd will restart sbd service immediately.
+ Restarting sbd service will also lead to restart corosync and pacemaker services because of the pre-defined dependencies among the systemd unit files.
+
+ b) {nodeA} experience the watchdog fencing
+ There is the race condition with the watchdog timer. Watchdog might reset {nodeA}, just before the sbd service get restarted and not tickle the watchdog timer in time.'''
+
+contents["sbd-l"] = '''On {nodeA}, the sbd service is killed consistantly all the time.
+Very quickly, systemd will hit the start limit to restart sbd service.
+Basically, in the end, systemd stops restarting anymore, marks the sbd service as failure.
+{nodeB} sbd cluster health check marks it as "UNHEALTHY".
+{nodeB} treats {nodeA} as a node lost, and fences it in the end.'''
+
+contents["corosync"] = '''On {nodeA}, once the corosync process get killed, systemd will restart corosync service immediately. There are two situations:
+ a) corosync process restarts
+ {nodeA} corosync process get restarted and rejoins to the existent membership quickly enough.
+ Basically, it happens before {nodeB} treats it as a node lost.
+ In the end, the cluster looks like nothing happened to the user. RA stays safe and sound.
+
+ b) {nodeA} gets fenced
+ {nodeA} gets fenced since {nodeB} corosync just ran out of timeout and treat it as a node lost and forms a new membership.
+ The decision making process of {nodeB}, pengine(aka. schedulerd in Pacemaker 2), will initiate fence action against {nodeA}. '''
+
+contents["corosync-l"] = '''The corosync service is killed consistantly all the time.
+Very quickly, systemd will hit the start limit to restart corosync service.
+Basically, in the end, systemd stops restarting anymore, marks the corosync service as failure. {nodeB} treats {nodeA} as a node lost, marks it as "unclean", and fence it in the end.'''
+
+contents["pacemakerd"] = '''The pacemakerd process gets restarted by systemd. All RAs must stay intact.'''
diff --git a/crmsh/crash_test/main.py b/crmsh/crash_test/main.py
new file mode 100644
index 0000000..638a393
--- /dev/null
+++ b/crmsh/crash_test/main.py
@@ -0,0 +1,209 @@
+import os
+import sys
+import argparse
+from argparse import RawDescriptionHelpFormatter
+
+from . import check
+from . import utils
+from . import task
+from crmsh import utils as crmshutils
+from crmsh import log
+
+
+logger = log.setup_logger(__name__)
+
+
+class Context(object):
+ """
+ Class to store context attributes
+ """
+ def __init__(self):
+ """
+ Initialize attributes
+ """
+ self.process_name = None
+ self.var_dir = None
+ self.task_list = []
+ self.report_path = None
+ self.jsonfile = None
+ self.logfile = None
+ self.current_case = None
+
+ # set by argparse(functions)
+ self.check_conf = None
+ self.cluster_check = None
+ self.sbd = None
+ self.corosync = None
+ self.pacemakerd = None
+ self.fence_node = None
+ self.sp_iptables = None
+ self.loop = None
+
+ # set by argument(additional options)
+ self.force = None
+ self.help = None
+
+ def __setattr__(self, name, value):
+ super(Context, self).__setattr__(name, value)
+
+
+ctx = Context()
+
+
+def kill_process(context):
+ """
+ Testcase: kill cluster related processes
+ --kill-sbd restarted or fenced
+ --kill-sbd -l fenced
+ --kill-corosync restarted or fenced
+ --kill-corosync -l fenced
+ --kill-pacemakerd restarted
+ --kill-pacemakerd -l blocked by bsc#1111692
+ """
+ for case in ('sbd', 'corosync', 'pacemakerd'):
+ if getattr(context, case):
+ if case == 'pacemakerd' and context.loop:
+ return #blocked by bsc#1111692
+ context.current_case = case
+ break
+ else:
+ return
+
+ task_inst = task.TaskKill(context)
+ try:
+ task_inst.pre_check()
+ task_inst.print_header()
+ task_inst.enable_report()
+ task_inst.run()
+ task_inst.wait()
+ except task.TaskError as err:
+ task_inst.error(str(err))
+ raise crmshutils.TerminateSubCommand
+
+
+def split_brain(context):
+ """
+ Testcase: make split brain by blocking corosync ports
+ """
+ if not context.sp_iptables:
+ return
+
+ task_inst = task.TaskSplitBrain(context.force)
+ try:
+ task_inst.pre_check()
+ task_inst.print_header()
+ with task_inst.do_block():
+ task_inst.run()
+ task_inst.wait()
+ except task.TaskError as err:
+ task_inst.error(str(err))
+ raise crmshutils.TerminateSubCommand
+
+
+def fence_node(context):
+ """
+ Testcase: fence specific node
+ """
+ if not context.fence_node:
+ return
+
+ task_inst = task.TaskFence(context)
+ try:
+ task_inst.pre_check()
+ task_inst.print_header()
+ task_inst.run()
+ task_inst.wait()
+ except task.TaskError as err:
+ task_inst.error(str(err))
+ raise crmshutils.TerminateSubCommand
+
+
+class MyArgParseFormatter(RawDescriptionHelpFormatter):
+ def __init__(self, prog):
+ super(MyArgParseFormatter, self).__init__(prog, max_help_position=50)
+
+
+def parse_argument(context):
+ """
+ Parse argument using argparse
+ """
+ parser = argparse.ArgumentParser(prog=context.process_name,
+ description="""
+Cluster crash test tool set.
+It standardizes the steps to simulate cluster failures before you move your cluster
+into production. It is carefully designed with the proper steps and does not change
+any configuration to harm the cluster without the confirmation from users.""",
+ add_help=False,
+ formatter_class=MyArgParseFormatter,
+ epilog='''
+Log: {}
+Json results: {}
+For each --kill-* testcase, report directory: {}'''.format(context.logfile,
+ context.jsonfile,
+ context.report_path))
+
+ #parser.add_argument('-c', '--check-conf', dest='check_conf', action='store_true',
+ # help='Validate the configurations')
+
+ group_mutual = parser.add_mutually_exclusive_group()
+ group_mutual.add_argument('--kill-sbd', dest='sbd', action='store_true',
+ help='Kill sbd daemon')
+ group_mutual.add_argument('--kill-corosync', dest='corosync', action='store_true',
+ help='Kill corosync daemon')
+ group_mutual.add_argument('--kill-pacemakerd', dest='pacemakerd', action='store_true',
+ help='Kill pacemakerd daemon')
+ group_mutual.add_argument('--fence-node', dest='fence_node', metavar='NODE',
+ help='Fence specific node')
+ group_mutual.add_argument('--split-brain-iptables', dest='sp_iptables', action='store_true',
+ help='Make split brain by blocking traffic between cluster nodes')
+ parser.add_argument('-l', '--kill-loop', dest='loop', action='store_true',
+ help='Kill process in loop')
+
+ other_options = parser.add_argument_group('other options')
+ other_options.add_argument('-f', '--force', dest='force', action='store_true',
+ help='Force to skip all prompts (Use with caution, the intended fault will be injected to verify the cluster resilence)')
+ other_options.add_argument('-h', '--help', dest='help', action='store_true',
+ help='Show this help message and exit')
+
+ args = parser.parse_args()
+ if args.help or len(sys.argv) == 1:
+ parser.print_help()
+ raise crmshutils.TerminateSubCommand
+
+ for arg in vars(args):
+ setattr(context, arg, getattr(args, arg))
+
+
+def setup_basic_context(context):
+ """
+ Setup basic context
+ """
+ var_dir = "/var/lib/crmsh/{}".format(context.process_name)
+ context.var_dir = var_dir
+ context.report_path = var_dir
+ context.jsonfile = "{}/{}.json".format(var_dir, context.process_name)
+ context.logfile = log.CRMSH_LOG_FILE
+
+
+def run(context):
+ """
+ Major work flow
+ """
+ setup_basic_context(context)
+ parse_argument(context)
+ if not utils.is_root():
+ logger.fatal("{} can only be executed as user root!".format(context.process_name))
+ raise crmshutils.TerminateSubCommand
+ if not os.path.exists(context.var_dir):
+ os.makedirs(context.var_dir, exist_ok=True)
+
+ try:
+ check.fix(context)
+ check.check(context)
+ kill_process(context)
+ fence_node(context)
+ split_brain(context)
+
+ except KeyboardInterrupt:
+ utils.json_dumps()
+ raise
diff --git a/crmsh/crash_test/task.py b/crmsh/crash_test/task.py
new file mode 100644
index 0000000..62c89c6
--- /dev/null
+++ b/crmsh/crash_test/task.py
@@ -0,0 +1,637 @@
+import os
+import re
+import time
+import threading
+import shutil
+import tempfile
+from contextlib import contextmanager
+from crmsh import utils as crmshutils
+from crmsh import log
+from . import utils
+from . import config
+from ..service_manager import ServiceManager
+from ..sh import ShellUtils
+
+logger = log.setup_logger(__name__)
+
+
+class TaskError(Exception):
+ pass
+
+
+class Task(object):
+ """
+ Task is a base class
+ Use for record the information of each test case
+ """
+ REBOOT_WARNING = """!!! WARNING WARNING WARNING !!!
+THIS CASE MAY LEAD TO NODE BE FENCED.
+TYPE Yes TO CONTINUE, OTHER INPUTS WILL CANCEL THIS CASE [Yes/No](No): """
+
+ def __init__(self, description, flush=False, quiet=False):
+ """
+ Init function
+ flush, to print the message immediately
+ """
+ self.passed = True
+ self.force = False
+ self.quiet = quiet
+ self.messages = []
+ self.timestamp = utils.now()
+ self.description = description
+ utils.msg_info(self.description, to_stdout=False)
+ self.flush = flush
+ self.fence_start_event = threading.Event()
+ self.fence_finish_event = threading.Event()
+ self.thread_stop_event = threading.Event()
+ from . import main
+ self.prev_task_list = main.ctx.task_list
+
+ def info(self, msg):
+ self.msg_append("info", msg)
+ utils.msg_info(msg, to_stdout=self.flush)
+
+ def warn(self, msg):
+ self.msg_append("warn", msg)
+ utils.msg_warn(msg, to_stdout=self.flush)
+
+ def error(self, msg):
+ self.msg_append("error", msg)
+ utils.msg_error(msg, to_stdout=self.flush)
+
+ def msg_append(self, msg_type, msg):
+ if msg_type == "error":
+ self.passed = False
+ self.messages.append((msg_type, msg, utils.now()))
+ if self.flush:
+ self.to_json()
+ self.to_report()
+
+ def header(self):
+ pass
+
+ def to_report(self):
+ pass
+
+ def to_json(self):
+ pass
+
+ def build_base_result(self):
+ """
+ Build base results
+ """
+ self.result = {
+ "Timestamp": self.timestamp,
+ "Description": self.description,
+ "Messages": ["{} {}:{}".format(m[2], m[0].upper(), m[1])
+ for m in self.messages]
+ }
+
+ def print_header(self):
+ """
+ Print testcase header
+ """
+ print(self.header())
+ if not self.force and not utils.warning_ask(self.REBOOT_WARNING):
+ self.info("Testcase cancelled")
+ raise crmshutils.TerminateSubCommand
+
+ def task_pre_check(self, need_fence=True):
+ """
+ Prerequisite check
+ * pacemaker.service is active
+ * stonith is enabled
+ """
+ if not ServiceManager().service_is_active("pacemaker.service"):
+ raise TaskError("Cluster not running!")
+ if need_fence:
+ self.get_fence_info()
+ if not self.fence_enabled:
+ raise TaskError("Require stonith enabled")
+
+ def get_fence_info(self):
+ """
+ Get fence info
+ """
+ fence_info_inst = utils.FenceInfo()
+ self.fence_enabled = fence_info_inst.fence_enabled
+ self.fence_action = fence_info_inst.fence_action
+ self.fence_timeout = fence_info_inst.fence_timeout
+
+ def fence_action_monitor(self):
+ """
+ Monitor fencing process, running in thread, exit on two cases:
+ 1. There is one latest fence action successfully done
+ 2. No fence action during fence timeout, thread_stop_event triggered by main thread
+ """
+ target_node = None
+ from_node = None
+ fence_timestamp = None
+
+ # Try to find out which node fire the fence action
+ while not self.thread_stop_event.is_set():
+ rc, out, _ = ShellUtils().get_stdout_stderr("crm_mon -1|grep -A1 \"Fencing Actions:\"")
+ if rc == 0 and out:
+ match = re.search(r"of (.*) pending: .*origin=(.*)$", out)
+ if match:
+ target_node, from_node = match.groups()
+ self.info("Node \"{}\" will be fenced by \"{}\"!".format(target_node, from_node))
+ self.fence_start_event.set()
+ break
+ time.sleep(1)
+
+ # Try to find out proof that fence happened
+ while not self.thread_stop_event.is_set():
+ rc, out, _ = ShellUtils().get_stdout_stderr(config.FENCE_HISTORY.format(node=target_node))
+ if rc == 0 and out:
+ match = re.search(r"Node {} last fenced at: (.*)".format(target_node), out)
+ if match:
+ fence_timestamp = match.group(1)
+ task_timestamp_dt = utils.str_to_datetime(self.timestamp, '%Y/%m/%d %H:%M:%S')
+ fence_timestamp_dt = utils.str_to_datetime(fence_timestamp, '%a %b %d %H:%M:%S %Y')
+ # If the fence action timestamp larger than this task's timestamp
+ # That is the proof
+ if task_timestamp_dt < fence_timestamp_dt:
+ self.info("Node \"{}\" was successfully fenced by \"{}\"".format(target_node, from_node))
+ # Tell main thread fence happened
+ self.fence_finish_event.set()
+ break
+ time.sleep(1)
+
+
+class TaskFence(Task):
+ """
+ Class to fence node
+ """
+ def __init__(self, context):
+ """
+ Init function
+ """
+ self.target_node = context.fence_node
+ description = "Fence node {}".format(self.target_node)
+ super(self.__class__, self).__init__(description, flush=True)
+ self.force = context.force
+
+ def header(self):
+ """
+ Header content for this task
+ """
+ h = '''==============================================
+Testcase: {}
+Fence action: {}
+Fence timeout: {}
+'''.format(self.description, self.fence_action, self.fence_timeout)
+ return h
+
+ def to_json(self):
+ """
+ Dump join result
+ """
+ self.build_base_result()
+ self.result['Fence action'] = self.fence_action
+ self.result['Fence timeout'] = self.fence_timeout
+ from . import main
+ main.ctx.task_list = self.prev_task_list + [self.result]
+ utils.json_dumps()
+
+ def pre_check(self):
+ """
+ Check the prerequisite for fence node
+ """
+ self.task_pre_check()
+
+ for cmd in ['crm_node', 'stonith_admin', 'crm_attribute']:
+ rc, _, err = ShellUtils().get_stdout_stderr("which {}".format(cmd))
+ if rc != 0 and err:
+ raise TaskError(err)
+
+ if not utils.check_node_status(self.target_node, 'member'):
+ raise TaskError("Node \"{}\" not in cluster!".format(self.target_node))
+
+ def run(self):
+ """
+ Fence node and start a thread to monitor the result
+ """
+ self.info("Trying to fence node \"{}\"".format(self.target_node))
+ ShellUtils().get_stdout_stderr(config.FENCE_NODE.format(self.target_node))
+ th = threading.Thread(target=self.fence_action_monitor)
+ th.start()
+
+ def wait(self):
+ """
+ Wait until fence happened
+ """
+ if self.target_node == utils.this_node():
+ self.info("Waiting {}s for self {}...".format(self.fence_timeout, self.fence_action))
+ else:
+ self.info("Waiting {}s for node \"{}\" {}...".format(self.fence_timeout,
+ self.target_node, self.fence_action))
+
+ result = self.fence_finish_event.wait(int(self.fence_timeout))
+ if not result:
+ self.thread_stop_event.set()
+ raise TaskError("Target fence node \"{}\" still alive".format(self.target_node))
+
+
+class TaskCheck(Task):
+ """
+ Class to define the format of output for checking item results and how to dump json
+ """
+
+ def __init__(self, description, quiet=False):
+ """
+ Init function
+ """
+ super(self.__class__, self).__init__(description, quiet=quiet)
+
+ def to_stdout(self):
+ """
+ Define the format of results to stdout
+ """
+ with utils.manage_handler("file", keep=False):
+ utils.get_handler(logger, "stream").setFormatter(utils.MyLoggingFormatter(flush=False))
+
+ if self.passed:
+ message = "{} [{}]".format(self.description, utils.CGREEN + "Pass" + utils.CEND)
+ else:
+ message = "{} [{}]".format(self.description, utils.CRED + "Fail" + utils.CEND)
+ logger.info(message, extra={'timestamp': '[{}]'.format(self.timestamp)})
+
+ for msg in self.messages:
+ logger.log(utils.LEVEL[msg[0]], msg[1], extra={'timestamp': ' '})
+
+ utils.get_handler(logger, "stream").setFormatter(utils.MyLoggingFormatter())
+
+ def to_json(self):
+ """
+ Json results
+ """
+ self.build_base_result()
+ self.result['Result'] = self.passed
+ from . import main
+ main.ctx.task_list.append(self.result)
+ utils.json_dumps()
+
+ def print_result(self):
+ """
+ Print results to stdout and json
+ """
+ if self.quiet:
+ return
+ self.to_stdout()
+ self.to_json()
+
+ @contextmanager
+ def run(self):
+ """
+ Context manager to do things and print results finally
+ """
+ try:
+ yield
+ finally:
+ self.print_result()
+
+
+class TaskKill(Task):
+ """
+ Class to define how to run kill testcases
+ """
+
+ EXPECTED = {
+ # process_name: (expected_results, expected_results_with_loop)
+ 'sbd': ('''a) sbd process restarted
+ b) Or, this node fenced.''', 'This node fenced'),
+ 'corosync': ('''a) corosync process restarted
+ b) Or, this node fenced.''', 'This node fenced'),
+ 'pacemakerd': ('pacemakerd process restarted', None),
+ }
+ WAIT_TIMEOUT = 10
+
+ def __init__(self, context):
+ """
+ Init function
+ """
+ self.target_kill = context.current_case
+ self.description = "Force kill {}".format(self.target_kill)
+ super(self.__class__, self).__init__(self.description, flush=True)
+ self.cmd = "killall -9 {}".format(self.target_kill)
+ self.looping = context.loop
+ self.force = context.force
+ if not self.looping:
+ self.expected = self.EXPECTED[self.target_kill][0]
+ else:
+ self.expected = self.EXPECTED[self.target_kill][1]
+ self.report = False
+ self.restart_happen_event = threading.Event()
+
+ def enable_report(self):
+ """
+ Enable report
+ """
+ self.report = True
+ from . import main
+ if not os.path.isdir(main.ctx.report_path):
+ raise TaskError("{} is not a directory".format(main.ctx.report_path))
+
+ report_path = main.ctx.report_path
+ report_name = "{}-{}.report".format(main.ctx.process_name, utils.now("%Y%m%d-%s"))
+ self.report_file = os.path.join(report_path, report_name)
+ print("(Report: {})".format(self.report_file))
+
+ if self.looping:
+ content_key = "{}-l".format(self.target_kill)
+ else:
+ content_key = self.target_kill
+
+ from . import explain
+ self.explain = explain.contents[content_key].format(nodeA=utils.this_node(), nodeB="other node")
+
+ def header(self):
+ """
+ Define descriptions
+ """
+ h = '''==============================================
+Testcase: {}
+Looping Kill: {}
+Expected State: {}
+'''.format(self.description, self.looping, self.expected)
+ return h
+
+ def to_json(self):
+ """
+ Json results
+ """
+ self.build_base_result()
+ self.result['Looping Kill'] = self.looping
+ self.result['Expected State'] = self.expected
+ from . import main
+ main.ctx.task_list = self.prev_task_list + [self.result]
+ utils.json_dumps()
+
+ def to_report(self):
+ """
+ Generate report
+ """
+ if not self.report:
+ return
+ with open(self.report_file, 'w') as f:
+ f.write(self.header())
+ f.write("\nLog:\n")
+ for m in self.messages:
+ f.write("{} {}:{}\n".format(m[2], m[0].upper(), m[1]))
+ f.write("\nTestcase Explained:\n")
+ f.write("{}\n".format(self.explain))
+ f.flush()
+ os.fsync(f)
+
+ def pre_check(self):
+ """
+ Check the prerequisite
+ """
+ self.task_pre_check()
+ rc, pid = utils.get_process_status(self.target_kill)
+ if not rc:
+ raise TaskError("Process {} is not running!".format(self.target_kill))
+
+ def run(self):
+ """
+ Execute specific kill command and monitor the results
+ """
+ while True:
+ rc, pid = utils.get_process_status(self.target_kill)
+ if rc:
+ self.info("Process {}({}) is running...".format(self.target_kill, pid))
+ else:
+ continue
+ self.info("Trying to run \"{}\"".format(self.cmd))
+ ShellUtils().get_stdout_stderr(self.cmd)
+ # endless loop will lead to fence
+ if not self.looping:
+ break
+
+ fence_check_th = threading.Thread(target=self.fence_action_monitor)
+ fence_check_th.start()
+ restart_check_th = threading.Thread(target=self.process_monitor)
+ restart_check_th.start()
+
+ def wait(self):
+ """
+ Wait process to restart
+ """
+ if self.fence_start_event.wait(self.WAIT_TIMEOUT) and not self.restart_happen_event.is_set():
+ raise TaskError("Process {} is not restarted!".format(self.target_kill))
+ self.thread_stop_event.set()
+
+ def process_monitor(self):
+ """
+ Monitor process status
+ """
+ while not self.thread_stop_event.is_set():
+ rc, pid = utils.get_process_status(self.target_kill)
+ if rc:
+ self.info("Process {}({}) is restarted!".format(self.target_kill, pid))
+ self.restart_happen_event.set()
+ break
+ time.sleep(1)
+
+
+class TaskSplitBrain(Task):
+ """
+ Class to define how to simulate split brain by blocking traffic between cluster nodes
+ """
+
+ def __init__(self, force=False):
+ """
+ Init function
+ """
+ self.description = "Simulate split brain by blocking traffic between cluster nodes"
+ self.expected = "One of nodes get fenced"
+ self.ports = []
+ self.peer_nodelist = []
+ super(self.__class__, self).__init__(self.description, flush=True)
+ self.force = force
+
+ def header(self):
+ """
+ Define descriptions
+ """
+ h = '''==============================================
+Testcase: {}
+Expected Result: {}
+Fence action: {}
+Fence timeout: {}
+'''.format(self.description, self.expected, self.fence_action, self.fence_timeout)
+ return h
+
+ def to_json(self):
+ """
+ Json results
+ """
+ self.build_base_result()
+ self.result['Fence action'] = self.fence_action
+ self.result['Fence timeout'] = self.fence_timeout
+ from . import main
+ main.ctx.task_list = self.prev_task_list + [self.result]
+ utils.json_dumps()
+
+ def pre_check(self):
+ """
+ Check the prerequisite
+ """
+ self.task_pre_check()
+
+ for cmd in ["iptables"]:
+ rc, _, err = ShellUtils().get_stdout_stderr("which {}".format(cmd))
+ if rc != 0 and err:
+ raise TaskError(err)
+
+ if len(utils.online_nodes()) < 2:
+ raise TaskError("At least two nodes online!")
+
+ @contextmanager
+ def do_block(self):
+ """
+ Context manager to block and unblock ip/ports
+ """
+ self.do_block_iptables()
+ try:
+ yield
+ finally:
+ self.un_block()
+
+ def do_block_iptables(self):
+ """
+ Block corosync communication ip
+ """
+ self.peer_nodelist = utils.peer_node_list()
+ for node in self.peer_nodelist:
+ self.info("Trying to temporarily block {} communication ip".format(node))
+ for ip in crmshutils.get_iplist_from_name(node):
+ ShellUtils().get_stdout_stderr(config.BLOCK_IP.format(action='I', peer_ip=ip))
+
+ def un_block(self):
+ """
+ Unblock corosync ip/ports
+ """
+ self.un_block_iptables()
+
+ def un_block_iptables(self):
+ """
+ Unblock corosync communication ip
+ """
+ for node in self.peer_nodelist:
+ self.info("Trying to recover {} communication ip".format(node))
+ for ip in crmshutils.get_iplist_from_name(node):
+ ShellUtils().get_stdout_stderr(config.BLOCK_IP.format(action='D', peer_ip=ip))
+
+ def run(self):
+ """
+ Fence node and start a thread to monitor the result
+ """
+ #self.info("Trying to fence node \"{}\"".format(self.target_node))
+ #ShellUtils().get_stdout_stderr(config.FENCE_NODE.format(self.target_node), wait=False)
+ th = threading.Thread(target=self.fence_action_monitor)
+ th.start()
+
+ def wait(self):
+ """
+ Wait until fence happened
+ """
+ result = self.fence_finish_event.wait(int(self.fence_timeout))
+ if not result:
+ self.thread_stop_event.set()
+ # should be an error here
+
+
+class TaskFixSBD(Task):
+ """
+ Class to fix SBD DEVICE incorrect issue
+ """
+
+ def __init__(self, candidate, force=False):
+ self.new = candidate
+ self.description = "Replace SBD_DEVICE with candidate {}".format(self.new)
+ self.conf = config.SBD_CONF
+ super(self.__class__, self).__init__(self.description, flush=True)
+ self.bak = tempfile.mkstemp()[1]
+ self.edit = tempfile.mkstemp()[1]
+ self.force = force
+
+ sbd_options = crmshutils.parse_sysconfig(self.conf)
+ self.old = sbd_options["SBD_DEVICE"]
+
+ def header(self):
+ """
+ Case header
+ """
+ h = '''==============================================
+Case: {}
+Original SBD device: {}
+New SBD device: {}
+'''.format(self.description, self.old, self.new)
+ return h
+
+ def to_json(self):
+ """
+ Generate json output
+ """
+ self.build_base_result()
+ self.result['Original SBD device'] = self.old
+ self.result['New SBD device'] = self.new
+ from . import main
+ main.ctx.task_list = self.prev_task_list + [self.result]
+ utils.json_dumps()
+
+ def pre_check(self):
+ """
+ Check the prerequisite
+ """
+ if not os.path.exists(self.conf):
+ raise TaskError("Configure file {} not exist!".format(self.conf))
+
+ if not os.path.exists(self.new):
+ raise TaskError("Device {} not exist!".format(self.new))
+
+ @contextmanager
+ def backup(self):
+ """
+ Backup the configuration file before modify
+ """
+ shutil.copyfile(self.conf, self.bak)
+ try:
+ yield
+ finally:
+ if self.bak:
+ shutil.copyfile(self.bak, self.conf)
+
+ def run(self):
+ """
+ Change the SBD DEVICE of configuration file
+ """
+ with open(self.edit, "w") as editfd:
+ with open(self.conf, "r") as oldfd:
+ for line in oldfd.readlines():
+ if line.strip().startswith("SBD_DEVICE"):
+ line = "SBD_DEVICE='" + self.new +"'\n"
+ editfd.write(line)
+
+ try:
+ shutil.copymode(self.conf, self.edit)
+ os.remove(self.conf)
+ shutil.move(self.edit, self.conf)
+ os.remove(self.bak)
+ self.bak = None
+ except:
+ raise TaskError("Fail to modify file {}".format(self.conf))
+
+ def verify(self):
+ """
+ Verify the modification is working
+ """
+ sbd_options = crmshutils.parse_sysconfig(self.conf)
+
+ if sbd_options["SBD_DEVICE"] == self.new:
+ self.info("SBD DEVICE change succeed")
+ else:
+ raise TaskError("Fail to replace SBD device {} in {}!".
+ format(self.new, config.SBD_CONF))
diff --git a/crmsh/crash_test/utils.py b/crmsh/crash_test/utils.py
new file mode 100644
index 0000000..556d1dc
--- /dev/null
+++ b/crmsh/crash_test/utils.py
@@ -0,0 +1,304 @@
+import os
+import re
+import glob
+import json
+import logging
+from datetime import datetime
+from contextlib import contextmanager
+from crmsh import utils as crmshutils
+from . import config
+from crmsh import log
+from crmsh.sh import ShellUtils
+
+
+logger = log.setup_logger(__name__)
+
+
+CRED = '\033[31m'
+CYELLOW = '\033[33m'
+CGREEN = '\033[32m'
+CEND = '\033[0m'
+
+LEVEL = {
+ "info": logging.INFO,
+ "warn": logging.WARNING,
+ "error": logging.ERROR
+}
+
+
+class MyLoggingFormatter(logging.Formatter):
+ """
+ Class to change logging formatter
+ """
+
+ FORMAT_FLUSH = "[%(asctime)s]%(levelname)s: %(message)s"
+ FORMAT_NOFLUSH = "%(timestamp)s%(levelname)s: %(message)s"
+
+ COLORS = {
+ 'WARNING': CYELLOW,
+ 'INFO': CGREEN,
+ 'ERROR': CRED
+ }
+
+ def __init__(self, flush=True):
+ fmt = self.FORMAT_FLUSH if flush else self.FORMAT_NOFLUSH
+ logging.Formatter.__init__(self, fmt=fmt, datefmt='%Y/%m/%d %H:%M:%S')
+
+ def format(self, record):
+ levelname = record.levelname
+ if levelname in self.COLORS:
+ levelname_color = self.COLORS[levelname] + levelname + CEND
+ record.levelname = levelname_color
+ return logging.Formatter.format(self, record)
+
+
+def now(form="%Y/%m/%d %H:%M:%S"):
+ return datetime.now().strftime(form)
+
+
+@contextmanager
+def manage_handler(_type, keep=True):
+ """
+ Define a contextmanager to remove specific logging handler temporarily
+ """
+ try:
+ handler = get_handler(logger, _type)
+ if not keep:
+ logger.removeHandler(handler)
+ yield
+ finally:
+ if not keep:
+ logger.addHandler(handler)
+
+
+def msg_raw(level, msg, to_stdout=True):
+ with manage_handler("console", to_stdout):
+ logger.log(level, msg)
+
+
+def msg_info(msg, to_stdout=True):
+ msg_raw(logging.INFO, msg, to_stdout)
+
+
+def msg_warn(msg, to_stdout=True):
+ msg_raw(logging.WARNING, msg, to_stdout)
+
+
+def msg_error(msg, to_stdout=True):
+ msg_raw(logging.ERROR, msg, to_stdout)
+
+
+def json_dumps():
+ """
+ Dump the json results to file
+ """
+ from . import main
+ with open(main.ctx.jsonfile, 'w') as f:
+ f.write(json.dumps(main.ctx.task_list, indent=2))
+ f.flush()
+ os.fsync(f)
+
+
+class FenceInfo(object):
+ """
+ Class to collect fence info
+ """
+ @property
+ def fence_enabled(self):
+ enable_result = crmshutils.get_property("stonith-enabled")
+ if not enable_result or enable_result.lower() != "true":
+ return False
+ return True
+
+ @property
+ def fence_action(self):
+ action_result = crmshutils.get_property("stonith-action")
+ if action_result is None or action_result not in ["off", "poweroff", "reboot"]:
+ msg_error("Cluster property \"stonith-action\" should be reboot|off|poweroff")
+ return None
+ return action_result
+
+ @property
+ def fence_timeout(self):
+ timeout_result = crmshutils.get_property("stonith-timeout")
+ if timeout_result and re.match(r'[1-9][0-9]*(s|)$', timeout_result):
+ return timeout_result.strip("s")
+ return config.FENCE_TIMEOUT
+
+
+def check_node_status(node, state):
+ """
+ Check whether the node has expected state
+ """
+ rc, stdout, stderr = ShellUtils().get_stdout_stderr('crm_node -l')
+ if rc != 0:
+ msg_error(stderr)
+ return False
+ pattern = re.compile(r'^.* {} {}'.format(node, state), re.MULTILINE)
+ if not pattern.search(stdout):
+ return False
+ return True
+
+
+def online_nodes():
+ """
+ Get online node list
+ """
+ rc, stdout, stderr = ShellUtils().get_stdout_stderr('crm_mon -1')
+ if rc == 0 and stdout:
+ res = re.search(r'Online:\s+\[\s(.*)\s\]', stdout)
+ if res:
+ return res.group(1).split()
+ return []
+
+
+def peer_node_list():
+ """
+ Get online node list except self
+ """
+ online_nodelist = online_nodes()
+ if online_nodelist:
+ online_nodelist.remove(this_node())
+ return online_nodelist
+ return []
+
+
+def this_node():
+ """
+ Try to get the node name from crm_node command
+ If failed, use its hostname
+ """
+ rc, stdout, stderr = ShellUtils().get_stdout_stderr("crm_node --name")
+ if rc != 0:
+ msg_error(stderr)
+ return crmshutils.this_node()
+ return stdout
+
+
+def str_to_datetime(str_time, fmt):
+ return datetime.strptime(str_time, fmt)
+
+
+def corosync_port_list():
+ """
+ Get corosync ports using corosync-cmapctl
+ """
+ ports = []
+ rc, out, _ = ShellUtils().get_stdout_stderr("corosync-cmapctl totem.interface")
+ if rc == 0 and out:
+ ports = re.findall(r'(?:mcastport.*) ([0-9]+)', out)
+ return ports
+
+
+def get_handler(logger, _type):
+ """
+ Get logger specific handler
+ """
+ for h in logger.handlers:
+ if getattr(h, '_name') == _type:
+ return h
+
+
+def is_root():
+ return os.getuid() == 0
+
+
+def get_process_status(s):
+ """
+ Returns true if argument is the name of a running process.
+
+ s: process name
+ returns Boolean and pid
+ """
+ # find pids of running processes
+ pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
+ for pid in pids:
+ try:
+ pid_file = os.path.join('/proc', pid, 'cmdline')
+ with open(pid_file, 'rb') as f:
+ data = f.read()
+ procname = os.path.basename(crmshutils.to_ascii(data).replace('\x00', ' ').split(' ')[0])
+ if procname == s or procname == s + ':':
+ return True, int(pid)
+ except EnvironmentError:
+ # a process may have died since we got the list of pids
+ pass
+ return False, -1
+
+
+def _find_match_count(str1, str2):
+ """
+ Find the max match number of s1 and s2
+ """
+ leng = min(len(str1), len(str2))
+ num = 0
+
+ for i in range(leng):
+ if str1[i] == str2[i]:
+ num += 1
+ else:
+ break
+
+ return num
+
+
+def is_valid_sbd(dev):
+ """
+ Check whether the device is a initialized SBD device
+
+ dev: dev path
+ return 'True' if 'dev' is a initialized SBD device
+ """
+ if not os.path.exists(dev):
+ return False
+
+ rc, out, err = ShellUtils().get_stdout_stderr(config.SBD_CHECK_CMD.format(dev=dev))
+ if rc != 0 and err:
+ msg_error(err)
+ return False
+
+ return True
+
+
+def find_candidate_sbd(dev):
+ """
+ Find the devices that already have SBD header
+
+ return the path of candidate SBD device
+ """
+ ddir = os.path.dirname(dev)
+ dname = os.path.basename(dev)
+
+ dev_list = glob.glob(ddir + "/*")
+ if len(dev_list) == 0:
+ return ""
+
+ can_filter = filter(is_valid_sbd, dev_list)
+ candidates = list(can_filter)
+
+ if len(candidates) == 0:
+ return ""
+
+ index = 0
+ i = 0
+ max_match = -1
+ num_list = map(_find_match_count, [dev] * len(candidates), candidates)
+ for num in num_list:
+ if num > max_match:
+ max_match = num
+ index = i
+ i += 1
+
+ return candidates[index]
+
+
+def warning_ask(warn_string):
+ from . import main
+ if main.ctx.force:
+ return False
+
+ try:
+ ans = input(CYELLOW + warn_string + CEND)
+ except EOFError:
+ return False
+ return True if ans == "Yes" else False
diff --git a/crmsh/crm_gv.py b/crmsh/crm_gv.py
new file mode 100644
index 0000000..c1ed3b1
--- /dev/null
+++ b/crmsh/crm_gv.py
@@ -0,0 +1,238 @@
+# Copyright (C) 2013 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import re
+from . import config
+from . import tmpfiles
+from . import utils
+from .ordereddict import odict
+from . import log
+
+
+logger = log.setup_logger(__name__)
+# graphviz stuff
+
+
+def _attr_str(attr_d):
+ return ','.join(['%s="%s"' % (k, v)
+ for k, v in attr_d.items()])
+
+
+def _quoted(name):
+ if re.match('^[0-9_]', name):
+ return '"%s"' % (name)
+ return name
+
+
+class Gv(object):
+ '''
+ graph.
+ '''
+ EDGEOP = '' # actually defined in subclasses
+
+ def __init__(self, ident=None):
+ if ident:
+ self.ident = self.gv_id(ident)
+ else:
+ self.ident = ""
+ self.nodes = {}
+ self.edges = []
+ self.subgraphs = []
+ self.node_attrs = odict()
+ self.attrs = odict()
+ self.graph_attrs = odict()
+ self.edge_attrs = []
+ self.top_nodes = []
+ self.norank_nodes = []
+
+ def gv_id(self, n):
+ return n.replace('-', '_').replace('.', '_')
+
+ def new_graph_attr(self, attr, v):
+ self.graph_attrs[attr] = v
+
+ def new_attr(self, n, attr_n, attr_v):
+ ident = self.gv_id(n)
+ if ident not in self.attrs:
+ self.attrs[ident] = odict()
+ self.attrs[ident][attr_n] = attr_v
+
+ def new_node(self, n, top_node=False, norank=False):
+ '''
+ Register every node.
+ '''
+ ident = self.gv_id(n)
+ if top_node:
+ self.top_nodes.append(ident)
+ elif ident not in self.nodes:
+ self.nodes[ident] = 0
+ if norank:
+ self.norank_nodes.append(ident)
+
+ def my_edge(self, e):
+ return [self.gv_id(x) for x in e if x is not None]
+
+ def new_edge(self, e):
+ ne = self.my_edge(e)
+ for i, node in enumerate(ne):
+ if i == 0:
+ continue
+ if node in self.top_nodes:
+ continue
+ self.nodes[node] = i
+ self.edges.append(ne)
+ self.edge_attrs.append(odict())
+ return len(self.edges)-1
+
+ def new_edge_attr(self, e_id, attr_n, attr_v):
+ if e_id >= len(self.edge_attrs):
+ return # if the caller didn't create an edge beforehand
+ self.edge_attrs[e_id][attr_n] = attr_v
+
+ def edge_str(self, e_id):
+ e_s = self.EDGEOP.join(_quoted(x) for x in self.edges[e_id])
+ if e_id < len(self.edge_attrs):
+ return '%s [%s]' % (e_s, _attr_str(self.edge_attrs[e_id]))
+ else:
+ return e_s
+
+ def invis_edge_str(self, tn, node):
+ attrs = 'style="invis"'
+ if node in self.norank_nodes:
+ attrs = '%s,constraint="false"' % attrs
+ return '%s [%s];' % (self.EDGEOP.join([_quoted(tn), _quoted(node)]), attrs)
+
+ def invisible_edges(self):
+ '''
+ Dump invisible edges from top_nodes to every node which
+ is at the top of the edge or not in any other edge. This
+ seems to be the only way to keep the nodes (as in cluster
+ nodes) above resources.
+ NB: This is O(n^2) (nodes times resources).
+ '''
+ l = []
+ for tn in self.top_nodes:
+ for node, rank in self.nodes.items():
+ if rank > 0:
+ continue
+ l.append('\t%s' % self.invis_edge_str(tn, node))
+ return l
+
+ def header(self):
+ return ''
+
+ def footer(self):
+ return ''
+
+ def repr(self):
+ '''
+ Dump gv graph to a string.
+ '''
+ l = []
+ l.append(self.header())
+ if self.node_attrs:
+ l.append('\tnode [%s];' % _attr_str(self.node_attrs))
+ for attr, v in self.graph_attrs.items():
+ l.append('\t%s="%s";' % (attr, v))
+ for sg in self.subgraphs:
+ l.append('\t%s' % '\n\t'.join(sg.repr()))
+ for e_id in range(len(self.edges)):
+ l.append('\t%s;' % self.edge_str(e_id))
+ for n, attr_d in self.attrs.items():
+ attr_s = _attr_str(attr_d)
+ l.append('\t%s [%s];' % (_quoted(n), attr_s))
+ l += self.invisible_edges()
+ l.append(self.footer())
+ return l
+
+ def totmpf(self):
+ return utils.str2tmp('\n'.join(self.repr()))
+
+ def save(self, outf):
+ f = utils.safe_open_w(outf)
+ if not f:
+ return False
+ f.write('\n'.join(self.repr()))
+ f.write('\n')
+ utils.safe_close_w(f)
+ return True
+
+
+class GvDot(Gv):
+ '''
+ graphviz dot directed graph.
+ '''
+ EDGEOP = ' -> '
+
+ def __init__(self, ident=None):
+ Gv.__init__(self, ident)
+
+ def header(self):
+ name = self.ident and self.ident or "G"
+ return 'digraph %s {\n' % (name)
+
+ def footer(self):
+ return '}'
+
+ def group(self, members, ident=None):
+ '''
+ Groups are subgraphs.
+ '''
+ sg_obj = SubgraphDot(ident)
+ sg_obj.new_edge(members)
+ self.subgraphs.append(sg_obj)
+ self.new_node(members[0])
+ return sg_obj
+
+ def optional_set(self, members, ident=None):
+ '''
+ Optional resource sets.
+ '''
+ sg_obj = SubgraphDot(ident)
+ e_id = sg_obj.new_edge(members)
+ sg_obj.new_edge_attr(e_id, 'style', 'invis')
+ sg_obj.new_edge_attr(e_id, 'constraint', 'false')
+ self.subgraphs.append(sg_obj)
+ return sg_obj
+
+ def display(self):
+ if not config.core.dotty:
+ logger.error("dotty not found")
+ return False
+ dotf = self.totmpf()
+ if not dotf:
+ return False
+ utils.show_dot_graph(dotf, desc="configuration graph")
+ return True
+
+ def image(self, img_type, outf):
+ if not config.core.dot:
+ logger.error("dot not found")
+ return False
+ dotf = self.totmpf()
+ if not dotf:
+ return False
+ tmpfiles.add(dotf)
+ return (utils.ext_cmd_nosudo("%s -T%s -o%s %s" %
+ (config.core.dot, img_type, outf, dotf)) == 0)
+
+
+class SubgraphDot(GvDot):
+ '''
+ graphviz subgraph.
+ '''
+ def __init__(self, ident=None):
+ GvDot.__init__(self, ident)
+
+ def header(self):
+ if self.ident:
+ return 'subgraph %s {' % self.ident
+ else:
+ return '{'
+
+
+gv_types = {
+ "dot": GvDot,
+}
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/crm_pssh.py b/crmsh/crm_pssh.py
new file mode 100644
index 0000000..d8acc74
--- /dev/null
+++ b/crmsh/crm_pssh.py
@@ -0,0 +1,165 @@
+# Modified pssh
+# Copyright (c) 2011, Dejan Muhamedagic
+# Copyright (c) 2009, Andrew McNabb
+# Copyright (c) 2003-2008, Brent N. Chun
+
+"""Parallel ssh to the set of nodes in hosts.txt.
+
+For each node, this essentially does an "ssh host -l user prog [arg0] [arg1]
+...". The -o option can be used to store stdout from each remote node in a
+directory. Each output file in that directory will be named by the
+corresponding remote node's hostname or IP address.
+"""
+
+import os
+import glob
+import typing
+
+from . import config
+from . import log
+from .prun import prun
+
+logger = log.setup_logger(__name__)
+
+
+_DEFAULT_TIMEOUT = 60
+_EC_LOGROT = 120
+
+
+def get_output(odir, host):
+ '''
+ Looks for the output returned by the given host.
+ This is somewhat problematic, since it is possible that
+ different hosts can have similar hostnames. For example naming
+ hosts "host.1" and "host.2" will confuse this code.
+ '''
+ l = []
+ for fname in ["%s/%s" % (odir, host)] + glob.glob("%s/%s.[0-9]*" % (odir, host)):
+ try:
+ if os.path.isfile(fname):
+ with open(fname) as f:
+ l += f.readlines()
+ except IOError:
+ continue
+ return l
+
+
+def show_output(odir, hosts, desc):
+ '''
+ Display output from hosts. See get_output for caveats.
+ '''
+ for host in hosts:
+ out_l = get_output(odir, host)
+ if out_l:
+ print("%s %s:" % (host, desc))
+ print(''.join(out_l))
+
+
+def do_pssh(host_cmdline: typing.Sequence[typing.Tuple[str, str]], outdir, errdir, timeout_seconds=_DEFAULT_TIMEOUT):
+ if outdir:
+ os.makedirs(outdir, exist_ok=True)
+ if errdir:
+ os.makedirs(errdir, exist_ok=True)
+
+ class StdoutStderrInterceptor(prun.PRunInterceptor):
+ def __init__(self):
+ self._task_count = -1
+ self._stdout_path = None
+ self._stderr_path = None
+
+ def task(self, task: prun.Task) -> prun.Task:
+ self._task_count += 1
+ if outdir:
+ path = f'{outdir}/{task.context["host"]}.{self._task_count}'
+ task.stdout = prun.Task.RedirectToFile(path)
+ self._stdout_path = path
+ if errdir:
+ path = f'{errdir}/{task.context["host"]}.{self._task_count}'
+ task.stderr = prun.Task.RedirectToFile(path)
+ self._stderr_path = path
+ return task
+
+ def result(self, result: prun.ProcessResult) -> prun.ProcessResult:
+ result.stdout_path = self._stdout_path
+ result.stderr_path = self._stderr_path
+ return result
+
+ return prun.prun_multimap(host_cmdline, timeout_seconds=timeout_seconds,interceptor=StdoutStderrInterceptor())
+
+
+def examine_outcome(
+ results: typing.Sequence[typing.Tuple[str, typing.Union[prun.ProcessResult, prun.SSHError]]],
+ errdir: str,
+):
+ '''
+ A custom function to show stderr in case there were issues.
+ Not suited for callers who want better control of output or
+ per-host processing.
+ '''
+ if any(isinstance(result, prun.SSHError) for host, result in results):
+ logger.warning("ssh processes failed")
+ show_output(errdir, [host for host, result in results], "stderr")
+ return False
+ elif any((0 > result.returncode for host, result in results)):
+ # At least one process was killed.
+ logger.error("ssh process was killed")
+ show_output(errdir, [host for host, result in results], "stderr")
+ return False
+ elif any(0 != result.returncode and _EC_LOGROT != result.returncode for host, result in results):
+ logger.warning("some ssh processes failed")
+ show_output(errdir, [host for host, result in results], "stderr")
+ return False
+ return True
+
+
+def next_loglines(a, outdir, errdir, from_time):
+ '''
+ pssh to nodes to collect new logs.
+ '''
+ l = []
+ for node, rptlog, logfile, nextpos in a:
+ logger.debug("updating %s from %s (pos %d)", logfile, node, nextpos)
+ if logfile.startswith("/tmp") and logfile.endswith("/journal.log"):
+ cmdline = "/usr/bin/journalctl -o short-iso --since '%s' --no-pager" % (from_time)
+ else:
+ cmdline = "perl -e 'exit(%d) if (stat(\"%s\"))[7]<%d' && tail -c +%d %s" % (
+ _EC_LOGROT, logfile, nextpos-1, nextpos, logfile)
+ l.append([node, cmdline])
+ results = do_pssh(l, outdir, errdir)
+ if results:
+ return examine_outcome(results, errdir)
+ else:
+ return False
+
+
+def next_peinputs(node_pe_l, outdir, errdir):
+ '''
+ pssh to nodes to collect new logs.
+ '''
+ pe_dir = config.path.pe_state_dir
+ vardir = os.path.dirname(pe_dir)
+ l = []
+ for node, pe_l in node_pe_l:
+ red_pe_l = [os.path.join("pengine", os.path.basename(x)) for x in pe_l]
+ cmdline = "tar -C %s -chf - %s" % (vardir, ' '.join(red_pe_l))
+ logger.debug("getting new PE inputs %s from %s", red_pe_l, node)
+ l.append([node, cmdline])
+ if not l:
+ # is this a failure?
+ return True
+ results = do_pssh(l, outdir, errdir)
+ return examine_outcome(results, errdir)
+
+
+def do_pssh_cmd(cmd, node_l, outdir, errdir, timeout=20000):
+ '''
+ pssh to nodes and run cmd.
+ '''
+ l = []
+ for node in node_l:
+ l.append([node, cmd])
+ if not l:
+ return True
+ return do_pssh(l, outdir, errdir, timeout // 1000)
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/handles.py b/crmsh/handles.py
new file mode 100644
index 0000000..e43f415
--- /dev/null
+++ b/crmsh/handles.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import re
+
+
+headmatcher = re.compile(r'\{\{(\#|\^)?([A-Za-z0-9\#\$:_-]+)\}\}')
+
+
+class value(object):
+ """
+ An object that is indexable in mustasches,
+ but also evaluates to a value itself.
+ """
+ def __init__(self, obj, value):
+ self.value = value
+ self.obj = obj
+ self.get = obj.get
+
+ def __call__(self):
+ return self.value
+
+ def __repr__(self):
+ return "handles.value(%s, %s)" % (repr(self.obj), repr(self.value))
+
+ def __str__(self):
+ return "handles.value(%s, %s)" % (repr(self.obj), repr(self.value))
+
+
+def _join(d1, d2):
+ d = d1.copy()
+ d.update(d2)
+ return d
+
+
+def _resolve(path, context, strict):
+ for values in context:
+ r = path
+ p = values
+ while r and p is not None:
+ p, r = p.get(r[0]), r[1:]
+ if strict and r:
+ continue
+ if callable(p):
+ p = p()
+ if p is not None:
+ return p
+ if strict:
+ raise ValueError("Not set: %s" % (':'.join(path)))
+ return None
+
+
+def _push(path, value, context):
+ root = {}
+ leaf = root
+ for x in path[:-1]:
+ leaf = {}
+ root[x] = leaf
+ leaf[path[-1]] = value
+ ret = [root] + context
+ return ret
+
+
+def _textify(obj):
+ if obj is None:
+ return ''
+ elif obj is True:
+ return 'true'
+ elif obj is False:
+ return 'false'
+ return str(obj)
+
+
+def _parse(template, context, strict):
+ ret = ""
+ while template:
+ head = headmatcher.search(template)
+ if head is None:
+ ret += template
+ break
+ istart, iend, prefix, key = head.start(0), head.end(0), head.group(1), head.group(2)
+ if istart > 0:
+ ret += template[:istart]
+ path, block, invert = key.split(':'), prefix == '#', prefix == '^'
+ if not path:
+ raise ValueError("empty {{}} block found")
+ obj = _resolve(path, context, strict)
+ if block or invert:
+ tailtag = '{{/%s}}' % (key)
+ tailidx = iend + template[head.end(0):].find(tailtag)
+ if tailidx < iend:
+ raise ValueError("Unclosed conditional: %s" % head.group(0))
+ iend = tailidx + len(tailtag)
+ body = template[head.end(0):tailidx]
+ if body.startswith('\n') and (not ret or ret.endswith('\n')):
+ ret = ret[:-1]
+ if block:
+ if obj in (None, False):
+ pass
+ elif isinstance(obj, (tuple, list)):
+ for it in obj:
+ ret += _parse(body, _push(path, it, context), strict)
+ else:
+ ret += _parse(body, context, strict)
+ elif not obj:
+ ret += _parse(body, _push(path, "", context), strict)
+ if ret.endswith('\n') and template[iend:].startswith('\n'):
+ iend += 1
+ elif obj is not None:
+ ret += _textify(obj)
+ template = template[iend:]
+ return ret
+
+
+def parse(template, values, strict=False):
+ """
+ Takes as input a template string and a dict
+ of values, and replaces the following:
+ {{object:key}} = look up key in object and insert value
+ {{object}} = insert value if not None or False.
+ {{#object}} ... {{/object}} = if object is a dict or value, process text. if object
+ is a list, process text for each item in the list
+ (can't nest these for items with the same name)
+ {{^object}} ... {{/object}} = if object is falsy, process text.
+ If a path evaluates to a callable, the callable will be invoked to get the value.
+ """
+ return _parse(template, [values], strict)
diff --git a/crmsh/healthcheck.py b/crmsh/healthcheck.py
new file mode 100644
index 0000000..eaa63fe
--- /dev/null
+++ b/crmsh/healthcheck.py
@@ -0,0 +1,234 @@
+import logging
+import argparse
+import os
+import os.path
+import subprocess
+import sys
+import typing
+
+import crmsh.parallax
+import crmsh.utils
+
+
+logger = logging.getLogger(__name__)
+
+
+class Feature:
+ _feature_registry = dict()
+
+ def __init_subclass__(cls, **kwargs):
+ super().__init_subclass__(**kwargs)
+ Feature._feature_registry[cls.__name__.rsplit('.', 1)[-1]] = cls
+
+ @staticmethod
+ def get_feature_by_name(name: str):
+ return Feature._feature_registry[name]
+
+ def check_quick(self) -> bool:
+ raise NotImplementedError
+
+ def check_local(self, nodes: typing.Iterable[str]) -> bool:
+ """Check whether the feature is functional on local node."""
+ raise NotImplementedError
+
+ def check_cluster(self, nodes: typing.Iterable[str]) -> bool:
+ """Check whether the feature is functional on the cluster."""
+ raise NotImplementedError
+
+ def fix_local(self, nodes: typing.Iterable[str], ask: typing.Callable[[str], None]) -> None:
+ """Fix the feature on local node.
+
+ At least one of fix_local and fix_cluster should be implemented. If fix_local is not implemented, this method
+ will be run on each node.
+ """
+ raise NotImplementedError
+
+ def fix_cluster(self, nodes: typing.Iterable[str], ask: typing.Callable[[str], None]) -> None:
+ """Fix the feature on the cluster.
+
+ At least one of fix_local and fix_cluster should be implemented. If this method is not implemented, fix_local
+ will be run on each node.
+ """
+ raise NotImplementedError
+
+
+class FixFailure(Exception):
+ pass
+
+
+class AskDeniedByUser(Exception):
+ pass
+
+
+def feature_quick_check(feature: Feature):
+ return feature.check_quick()
+
+
+def feature_local_check(feature: Feature, nodes: typing.Iterable[str]):
+ try:
+ if not feature.check_quick():
+ return False
+ except NotImplementedError:
+ pass
+ return feature.check_local(nodes)
+
+
+def feature_full_check(feature: Feature, nodes: typing.Iterable[str]) -> bool:
+ try:
+ if not feature.check_quick():
+ return False
+ except NotImplementedError:
+ pass
+ try:
+ if not feature.check_local(nodes):
+ return False
+ except NotImplementedError:
+ pass
+ try:
+ return feature.check_cluster(nodes)
+ except NotImplementedError:
+ results = crmsh.parallax.parallax_run(
+ nodes,
+ '/usr/bin/env python3 -m crmsh.healthcheck check-local {}'.format(
+ feature.__class__.__name__.rsplit('.', 1)[-1],
+ )
+ )
+ return all(rc == 0 for rc, _, _ in results.values())
+
+
+def feature_fix(feature: Feature, nodes: typing.Iterable[str], ask: typing.Callable[[str], None]) -> None:
+ try:
+ return feature.fix_cluster(nodes, ask)
+ except NotImplementedError:
+ results = crmsh.parallax.parallax_run(
+ nodes,
+ '/usr/bin/env python3 -m crmsh.healthcheck fix-local {}'.format(
+ feature.__class__.__name__.rsplit('.', 1)[-1],
+ )
+ )
+ if any(rc != 0 for rc, _, _ in results.values()):
+ raise FixFailure
+
+
+class PasswordlessHaclusterAuthenticationFeature(Feature):
+ SSH_DIR = os.path.expanduser('~hacluster/.ssh')
+ KEY_TYPES = ['ed25519', 'ecdsa', 'rsa']
+
+ def __str__(self):
+ return "Configure Passwordless for hacluster"
+
+ def check_quick(self) -> bool:
+ for key_type in self.KEY_TYPES:
+ try:
+ os.stat('{}/{}'.format(self.SSH_DIR, key_type))
+ os.stat('{}/{}.pub'.format(self.SSH_DIR, key_type))
+ return True
+ except FileNotFoundError:
+ pass
+ return False
+
+ def check_local(self, nodes: typing.Iterable[str]) -> bool:
+ try:
+ for node in nodes:
+ subprocess.check_call(
+ ['sudo', 'su', '-', 'hacluster', '-c', 'ssh hacluster@{} true'.format(node)],
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ return True
+ except subprocess.CalledProcessError:
+ return False
+
+ def fix_cluster(self, nodes: typing.Iterable[str], ask: typing.Callable[[str], None]) -> None:
+ import crmsh.bootstrap # import bootstrap lazily here to avoid circular dependency
+ logger.debug("setup passwordless ssh authentication for user hacluster")
+ local_node = crmsh.utils.this_node()
+ remote_nodes = set(nodes)
+ remote_nodes.remove(local_node)
+ remote_nodes = list(remote_nodes)
+ crmsh.parallax.parallax_run(
+ nodes,
+ 'chown hacluster: ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys',
+ )
+ crmsh.bootstrap.configure_ssh_key('hacluster')
+ crmsh.bootstrap.swap_key_for_hacluster(remote_nodes)
+ for node in remote_nodes:
+ crmsh.bootstrap.change_user_shell('hacluster', node)
+
+
+def main_check_local(args) -> int:
+ try:
+ feature = Feature.get_feature_by_name(args.feature)()
+ nodes = crmsh.utils.list_cluster_nodes(no_reg=True)
+ if nodes:
+ if feature_local_check(feature, nodes):
+ return 0
+ else:
+ return 1
+ except KeyError:
+ logger.error('No such feature: %s.', args.feature)
+ return 2
+
+
+def main_fix_local(args) -> int:
+ try:
+ feature = Feature.get_feature_by_name(args.feature)()
+ nodes = crmsh.utils.list_cluster_nodes(no_reg=True)
+ if nodes:
+ if args.yes:
+ def ask(msg): return True
+ else:
+ def ask(msg): return crmsh.utils.ask('Healthcheck: fix: ' + msg, background_wait=False)
+ if args.without_check or not feature_local_check(feature, nodes):
+ feature.fix_local(nodes, ask)
+ return 0
+ except KeyError:
+ logger.error('No such feature: %s.', args.feature)
+ return 2
+
+
+def main_fix_cluster(args) -> int:
+ try:
+ feature = Feature.get_feature_by_name(args.feature)()
+ nodes = crmsh.utils.list_cluster_nodes(no_reg=True)
+ if nodes:
+ if args.yes:
+ def ask(msg): return True
+ else:
+ def ask(msg): return crmsh.utils.ask('Healthcheck: fix: ' + msg, background_wait=False)
+ if args.without_check or not feature_full_check(feature, nodes):
+ feature_fix(feature, nodes, ask)
+ return 0
+ except KeyError:
+ logger.error('No such feature: %s.', args.feature)
+ return 2
+
+
+def main() -> int:
+ # This entrance is for internal programmatic use only.
+ parser = argparse.ArgumentParser()
+ subparsers = parser.add_subparsers()
+
+ check_local_parser = subparsers.add_parser('check-local')
+ check_local_parser.add_argument('feature')
+ check_local_parser.set_defaults(func=main_check_local)
+
+ fix_cluster_parser = subparsers.add_parser('fix-local')
+ fix_cluster_parser.add_argument('--yes', action='store_true')
+ fix_cluster_parser.add_argument('--without-check', action='store_true')
+ fix_cluster_parser.add_argument('feature')
+ fix_cluster_parser.set_defaults(func=main_fix_local)
+
+ fix_cluster_parser = subparsers.add_parser('fix-cluster')
+ fix_cluster_parser.add_argument('--yes', action='store_true')
+ fix_cluster_parser.add_argument('--without-check', action='store_true')
+ fix_cluster_parser.add_argument('feature')
+ fix_cluster_parser.set_defaults(func=main_fix_cluster)
+
+ args = parser.parse_args()
+ return args.func(args)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/crmsh/help.py b/crmsh/help.py
new file mode 100644
index 0000000..5ac08fd
--- /dev/null
+++ b/crmsh/help.py
@@ -0,0 +1,449 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+'''
+The commands exposed by this module all
+get their data from the doc/crm.8.adoc text
+file. In that file, there are help for
+ - topics
+ - levels
+ - commands in levels
+
+The help file is lazily loaded when the first
+request for help is made.
+
+All help is in the following form in the manual:
+[[cmdhelp_<level>_<cmd>,<short help text>]]
+=== ...
+Long help text.
+...
+[[cmdhelp_<level>_<cmd>,<short help text>]]
+
+Help for the level itself is like this:
+
+[[cmdhelp_<level>,<short help text>]]
+'''
+
+import os
+import re
+from .sh import ShellUtils
+from .utils import page_string
+from . import config
+from . import clidisplay
+from .ordereddict import odict
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+class HelpFilter(object):
+ _B0 = re.compile(r'^\.{4,}')
+ _B1 = re.compile(r'^\*{4,}')
+ _QUOTED = re.compile(r'`([^`]+)`')
+ _MONO = re.compile(r'\+([^+]+)\+')
+ _TOPIC = re.compile(r'(.*)::$')
+ _TOPIC2 = re.compile(r'^\.\w+')
+
+ def __init__(self):
+ self.in_block = False
+
+ def _filter(self, line):
+ block_edge = self._B0.match(line) or self._B1.match(line)
+ if block_edge and not self.in_block:
+ self.in_block = True
+ return ''
+ elif block_edge and self.in_block:
+ self.in_block = False
+ return ''
+ elif not self.in_block:
+ if self._TOPIC2.match(line):
+ return clidisplay.help_topic(line[1:])
+ line = self._QUOTED.sub(clidisplay.help_keyword(r'\1'), line)
+ line = self._MONO.sub(clidisplay.help_block(r'\1'), line)
+ line = self._TOPIC.sub(clidisplay.help_topic(r'\1'), line)
+ return line
+ else:
+ return clidisplay.help_block(line)
+
+ def __call__(self, text):
+ return '\n'.join([self._filter(line) for line in text.splitlines()]) + '\n'
+
+
+class HelpEntry(object):
+ def __init__(self, short_help, long_help='', alias_for=None, generated=False):
+ if short_help:
+ self.short = short_help[0].upper() + short_help[1:]
+ else:
+ self.short = 'Help'
+ self.long = long_help
+ self.alias_for = alias_for
+ self.generated = generated
+ self.from_cli = False
+ self.level = False
+ self.name = False
+
+ def is_alias(self):
+ return self.alias_for is not None
+
+ def paginate(self):
+ '''
+ Display help, paginated.
+ Replace asciidoc syntax with colorized output where possible.
+ '''
+ helpfilter = HelpFilter()
+
+ short_help = clidisplay.help_header(self.short)
+ if self.from_cli and self.level and self.name:
+ level = '' if self.level == 'root' else self.level
+ _, output, _ = ShellUtils().get_stdout_stderr(f"crm {level} {self.name} --help-without-redirect")
+ page_string(short_help + '\n\n'+ output)
+ return
+
+ long_help = self.long
+ if long_help:
+ long_help = helpfilter(long_help)
+ if not long_help.startswith('\n'):
+ long_help = '\n' + long_help
+
+ prefix = ''
+ if self.is_alias():
+ prefix = helpfilter("(Redirected from `%s` to `%s`)\n" % self.alias_for)
+
+ page_string(short_help + '\n' + prefix + long_help)
+
+ def set_long_help(self, long_help):
+ self.long = long_help
+
+ def set_long_lazy_load_source(self, level, name, from_cli):
+ self.level = level
+ self.name = name
+ self.from_cli = from_cli
+
+ def __str__(self):
+ if self.long:
+ return self.short + '\n' + self.long
+ return self.short
+
+ def __repr__(self):
+ return str(self)
+
+
+HELP_FILE = os.path.join(config.path.sharedir, 'crm.8.adoc')
+
+_DEFAULT = HelpEntry('No help available', long_help='', alias_for=None, generated=True)
+_REFERENCE_RE = re.compile(r'<<[^,]+,(.+)>>')
+
+# loaded on demand
+# _LOADED is set to True when an attempt
+# has been made (so it won't be tried again)
+_LOADED = False
+_TOPICS = odict()
+_LEVELS = odict()
+_COMMANDS = odict()
+
+_TOPICS["Overview"] = HelpEntry("Available help topics and commands", generated=True)
+_TOPICS["Topics"] = HelpEntry("Available help topics", generated=True)
+
+
+def _titleline(title, desc, suffix='', width=16):
+ return '%-0*s %s\n' % (width, ('`%s`' % (title)) + suffix, desc)
+
+
+_hidden_commands = ('up', 'cd', 'help', 'quit', 'ls')
+
+
+def get_max_width(dict_):
+ max_width = 16
+ for key in list(dict_.keys()):
+ if max_width < len(key):
+ max_width = len(key)
+ if max_width >= 16:
+ max_width += 2
+ return max_width
+
+
+def help_overview():
+ '''
+ Returns an overview of all available
+ topics and commands.
+ '''
+ _load_help()
+ s = "Available topics:\n\n"
+ max_width = get_max_width(_TOPICS)
+ for title, topic in _TOPICS.items():
+ s += '\t' + _titleline(title, topic.short, width=max_width)
+ s += "\n"
+ s += "Available commands:\n\n"
+
+ max_width = get_max_width(_COMMANDS.get('root', {}))
+ for title, command in _COMMANDS.get('root', {}).items():
+ if not command.is_alias():
+ s += '\t' + _titleline(title, command.short, width=max_width)
+ s += "\n"
+
+ max_width_1 = get_max_width(_LEVELS)
+ for title, level in sorted(iter(_LEVELS.items()), key=lambda x: x[0]):
+ if title != 'root' and title in _COMMANDS:
+ s += '\t' + _titleline(title, level.short, suffix='/', width=max_width_1)
+ max_width_2 = get_max_width(_COMMANDS[title])
+ for cmdname, cmd in sorted(iter(_COMMANDS[title].items()), key=lambda x: x[0]):
+ if cmdname in _hidden_commands or cmdname.startswith('_'):
+ continue
+ if not cmd.is_alias():
+ s += '\t\t' + _titleline(cmdname, cmd.short, width=max_width_2)
+ s += "\n"
+ return HelpEntry('Help overview for crmsh\n', s, generated=True)
+
+
+def help_topics():
+ '''
+ Returns an overview of all available
+ topics.
+ '''
+ _load_help()
+ s = ''
+ max_width = get_max_width(_TOPICS)
+ for title, topic in _TOPICS.items():
+ s += '\t' + _titleline(title, topic.short, width=max_width)
+ return HelpEntry('Available topics\n', s, generated=True)
+
+
+def list_help_topics():
+ _load_help()
+ return list(_TOPICS.keys())
+
+
+def help_topic(topic):
+ '''
+ Returns a help entry for a given topic.
+ '''
+ _load_help()
+ return _TOPICS.get(topic, _DEFAULT)
+
+
+def help_level(level):
+ '''
+ Returns a help entry for a given level.
+ '''
+ _load_help()
+ from .command import fuzzy_get
+ return fuzzy_get(_LEVELS, level) or _DEFAULT
+
+
+def help_command(level, command):
+ '''
+ Returns a help entry for a given command
+ '''
+ _load_help()
+ from .command import fuzzy_get
+ lvlhelp = fuzzy_get(_COMMANDS, level)
+ if not lvlhelp:
+ raise ValueError("Undocumented topic '%s'" % (level))
+ cmdhelp = fuzzy_get(lvlhelp, command)
+ if not cmdhelp:
+ raise ValueError("Undocumented topic '%s' in '%s'" % (command, level))
+ return cmdhelp
+
+
+def _is_help_topic(arg):
+ return arg and arg[0].isupper()
+
+
+def _is_command(level, command):
+ from .command import fuzzy_get
+ return level in _COMMANDS and fuzzy_get(_COMMANDS[level], command)
+
+
+def _is_level(level):
+ from .command import fuzzy_get
+ return fuzzy_get(_LEVELS, level)
+
+
+def help_contextual(context, subject, subtopic):
+ """
+ Returns contextual help
+ """
+ _load_help()
+ if subject is None:
+ if context == 'root':
+ return help_overview()
+ return help_level(context)
+ if _is_help_topic(subject):
+ return help_topic(subject)
+ if subtopic is not None:
+ return help_command(subject, subtopic)
+ if _is_command(context, subject):
+ return help_command(context, subject)
+ if _is_level(subject):
+ return help_level(subject)
+ from .command import fuzzy_get
+ t = fuzzy_get(_TOPICS, subject.lower())
+ if t:
+ return t
+ raise ValueError("No help found for '%s'! 'overview' lists all help entries" % (subject))
+
+
+def add_help(entry, topic=None, level=None, command=None):
+ '''
+ Takes a help entry as argument and inserts it into the
+ help system.
+
+ Used to define some help texts statically, for example
+ for 'up' and 'help' itself.
+ '''
+ if topic:
+ if topic not in _TOPICS or _TOPICS[topic] is _DEFAULT:
+ _TOPICS[topic] = entry
+ elif level and command:
+ if level not in _LEVELS:
+ _LEVELS[level] = HelpEntry("No description available", generated=True)
+ if level not in _COMMANDS:
+ _COMMANDS[level] = odict()
+ lvl = _COMMANDS[level]
+ if command not in lvl or lvl[command] is _DEFAULT:
+ lvl[command] = entry
+ elif level:
+ if level not in _LEVELS or _LEVELS[level] is _DEFAULT:
+ _LEVELS[level] = entry
+
+
+def _load_help():
+ '''
+ Lazily load and parse crm.8.adoc.
+ '''
+ global _LOADED
+ if _LOADED:
+ return
+ _LOADED = True
+
+ def parse_header(line):
+ 'returns a new entry'
+ entry = {'type': '', 'name': '', 'short': '', 'long': '', "from_cli": False}
+ line = line[2:-3] # strip [[ and ]]\n
+ info, short_help = line.split(',', 1)
+ # TODO see https://github.com/ClusterLabs/crmsh/pull/644
+ # This solution has shortcome to delete the content of adoc,
+ # which lose the static man page archive
+ if "From Code" in short_help:
+ short_help, _ = short_help.split(',')
+ entry['from_cli'] = True
+ entry['short'] = short_help.strip()
+ info = info.split('_')
+ if info[0] == 'topics':
+ entry['type'] = 'topic'
+ entry['name'] = info[-1]
+ elif info[0] == 'cmdhelp':
+ if len(info) == 2:
+ entry['type'] = 'level'
+ entry['name'] = info[1]
+ elif len(info) >= 3:
+ entry['type'] = 'command'
+ entry['level'] = info[1]
+ entry['name'] = '_'.join(info[2:])
+
+ return entry
+
+ def process(entry):
+ 'writes the entry into topics/levels/commands'
+ short_help = entry['short']
+ long_help = entry['long']
+ if long_help.startswith('=='):
+ long_help = long_help.split('\n', 1)[1]
+ helpobj = HelpEntry(short_help, long_help.rstrip())
+ name = entry['name']
+ if entry['type'] == 'topic':
+ _TOPICS[name] = helpobj
+ elif entry['type'] == 'level':
+ _LEVELS[name] = helpobj
+ elif entry['type'] == 'command':
+ lvl = entry['level']
+ if lvl not in _COMMANDS:
+ _COMMANDS[lvl] = odict()
+ helpobj.set_long_lazy_load_source(entry['level'], entry['name'], entry['from_cli'])
+ _COMMANDS[lvl][name] = helpobj
+
+ def filter_line(line):
+ '''clean up an input line
+ - <<...>> references -> short description
+ '''
+ return _REFERENCE_RE.sub(r'\1', line)
+
+ def append_cmdinfos():
+ "append command information to level descriptions"
+ for lvlname, level in _LEVELS.items():
+ if lvlname in _COMMANDS:
+ level.long += "\n\nCommands:\n"
+ max_width = get_max_width(_COMMANDS[lvlname])
+ for cmdname, cmd in sorted(iter(_COMMANDS[lvlname].items()), key=lambda x: x[0]):
+ if cmdname in _hidden_commands or cmdname.startswith('_'):
+ continue
+ level.long += "\t" + _titleline(cmdname, cmd.short, width=max_width)
+ level.long += "\n"
+ for cmdname, cmd in sorted(iter(_COMMANDS[lvlname].items()), key=lambda x: x[0]):
+ if cmdname in _hidden_commands:
+ level.long += "\t" + _titleline(cmdname, cmd.short, width=max_width)
+
+ def fixup_root_commands():
+ "root commands appear as levels"
+
+ strip_topics = []
+ for tname, topic in _LEVELS.items():
+ if not _COMMANDS.get(tname):
+ strip_topics.append(tname)
+ for t in strip_topics:
+ del _LEVELS[t]
+
+ def fixup_help_aliases():
+ "add help for aliases"
+
+ def add_help_for_alias(lvlname, command, alias):
+ if lvlname not in _COMMANDS:
+ return
+ if command not in _COMMANDS[lvlname]:
+ return
+ if alias in _COMMANDS[lvlname]:
+ return
+ info = _COMMANDS[lvlname][command]
+ _COMMANDS[lvlname][alias] = HelpEntry(info.short, info.long, (alias, command))
+
+ def add_aliases_for_level(lvl):
+ for name, info in lvl.children().items():
+ for alias in info.aliases:
+ add_help_for_alias(lvl.name, info.name, alias)
+ if info.level:
+ add_aliases_for_level(info.level)
+ from .ui_root import Root
+ add_aliases_for_level(Root)
+
+ def fixup_topics():
+ "fix entries for topics and overview"
+ _TOPICS["Overview"] = help_overview()
+ _TOPICS["Topics"] = help_topics()
+
+ try:
+ name = os.getenv("CRM_HELP_FILE") or HELP_FILE
+ helpfile = open(name, 'r')
+ entry = None
+ for line in helpfile:
+ if line.startswith('[['):
+ if entry is not None:
+ process(entry)
+ entry = parse_header(line)
+ elif entry is not None and line.startswith('===') and entry['long']:
+ process(entry)
+ entry = None
+ elif entry is not None:
+ entry['long'] += filter_line(line)
+ if entry is not None:
+ process(entry)
+ helpfile.close()
+ append_cmdinfos()
+ fixup_root_commands()
+ fixup_help_aliases()
+ fixup_topics()
+ except IOError as msg:
+ logger.error("Help text not found! %s", msg)
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/history.py b/crmsh/history.py
new file mode 100644
index 0000000..94a3b73
--- /dev/null
+++ b/crmsh/history.py
@@ -0,0 +1,1056 @@
+# Copyright (C) 2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import time
+import re
+import glob
+import configparser
+
+from . import config
+from . import userdir
+from . import logtime
+from . import logparser
+from . import utils
+from . import log
+from .sh import ShellUtils
+from crmsh.report import core
+
+
+logger = log.setup_logger(__name__)
+
+
+_LOG_FILES = ("ha-log.txt", "messages", "ha-log", "cluster-log.txt", "journal.log", "pacemaker.log")
+
+
+#
+# crm report interface
+#
+# read crm report generated report, show interesting stuff, search
+# through logs, get PE input files, get log slices (perhaps even
+# coloured nicely!)
+#
+
+
+def is_our_log(s, node_l):
+ return logtime.syslog2node(s) in node_l
+
+
+def log2node(log):
+ return os.path.basename(os.path.dirname(log))
+
+
+def is_log(p):
+ return os.path.isfile(p) and os.path.getsize(p) > 0
+
+
+_PE_NUM_RE = re.compile("pe-[^-]+-([0-9]+)[.]")
+
+
+def get_pe_num(pe_file):
+ m = _PE_NUM_RE.search(pe_file)
+ if m:
+ return m.group(1)
+ return "-1"
+
+
+def pe_file_in_range(pe_f, a):
+ if not a:
+ return pe_f
+ if a[0] <= int(get_pe_num(pe_f)) <= a[1]:
+ return pe_f
+ return None
+
+
+def read_log_info(log):
+ 'Read <log>.info and return logfile and next pos'
+ s = utils.file2str(log + ".info")
+ m = re.match(r"^(.+)\s+(\d+)$", s or '')
+ if m:
+ logf, pos = m.groups()
+ return logf, int(pos)
+ return '', -1
+
+
+def append_newlogs(outdir, to_update):
+ '''
+ Append new logs fetched from nodes.
+ Update <log>.info with new next pos
+ '''
+ if not os.path.isdir(outdir):
+ return
+ for node, rptlog, logfile, nextpos in to_update:
+ fl = glob.glob("%s/*%s*" % (outdir, node))
+ if not fl:
+ continue
+ utils.append_file(rptlog, fl[0])
+
+ newpos = nextpos + os.stat(fl[0]).st_size
+ try:
+ f = open(rptlog + ".info", "w")
+ f.write("%s %d\n" % (logfile, newpos))
+ f.close()
+ except IOError as msg:
+ logger.error("couldn't the update %s.info: %s", rptlog, msg)
+
+
+def rpt_pe2t_str(rpt_pe_file):
+ '''Convert report's pe_file path to transition string.'''
+ node = os.path.basename(os.path.dirname(os.path.dirname(rpt_pe_file)))
+ return logparser.trans_str(node, rpt_pe_file)
+
+
+def mkarchive(idir):
+ "Create an archive from a directory"
+ home = userdir.gethomedir()
+ if not home:
+ logger.error("no home directory, nowhere to pack report")
+ return False
+ _, ext = core.pick_first_compress()
+ if not ext:
+ return False
+ name = os.path.join(home, os.path.basename(idir))
+ archive = f'{name}.tar{ext}'
+ cmd = "tar -C '%s/..' -cj -f '%s' %s" % \
+ (idir, archive, os.path.basename(idir))
+ if utils.pipe_cmd_nosudo(cmd) != 0:
+ logger.error('could not pack report, command "%s" failed', cmd)
+ return False
+ else:
+ print("Report saved in '%s'" % archive)
+ return True
+
+
+CH_SRC, CH_TIME, CH_UPD = 1, 2, 3
+
+
+class Report(object):
+ '''
+ A crm report class.
+ '''
+ live_recent = 6*60*60 # recreate live crm report once every 6 hours
+ short_live_recent = 60 # update once a minute
+ nodecolors = ("NORMAL",
+ "GREEN",
+ "CYAN",
+ "MAGENTA",
+ "YELLOW",
+ "WHITE",
+ "BLUE",
+ "RED")
+ session_sub = "session"
+ report_cache_dir = os.path.join(config.path.cache, 'history-%s' % (utils.getuser()))
+ outdir = os.path.join(report_cache_dir, "psshout")
+ errdir = os.path.join(report_cache_dir, "pssherr")
+
+ def __init__(self):
+ # main source attributes
+ self._creation_time = "--:--:--"
+ self._creator = "unknown"
+ self.source = None
+ self.from_dt = None
+ self.to_dt = None
+ self.log_l = []
+ self.setnodes = [] # optional
+ # derived
+ self.loc = None
+ self.ready = False
+ self.nodecolor = {}
+ self.logparser = None
+ self.desc = None
+ self.cib = None
+ self.node_l = []
+ self.last_live_update = 0
+ self.detail = 0
+ self.log_filter_out = []
+ self.log_filter_out_re = []
+ # change_origin may be 0, CH_SRC, CH_TIME, CH_UPD
+ # depending on the change_origin, we update our attributes
+ self.change_origin = CH_SRC
+ logtime.set_year()
+
+ def error(self, s):
+ logger.error("%s: %s", self.source, s)
+
+ def warn(self, s):
+ logger.warning("%s: %s", self.source, s)
+
+ def rsc_list(self):
+ return self.cib.resources()
+
+ def node_list(self):
+ return self.node_l
+
+ def peinputs_list(self):
+ if self.logparser:
+ return [x.pe_num for x in self.logparser.get_transitions()]
+ return []
+
+ def session_subcmd_list(self):
+ return ["save", "load", "pack", "delete", "list", "update"]
+
+ def session_list(self):
+ d = self.get_session_dir(None)
+ return os.listdir(d).sort() if os.path.isdir(d) else []
+
+ def unpack_report(self, tarball):
+ '''
+ Unpack crm report tarball.
+ Don't unpack if the directory already exists!
+ '''
+ bfname = os.path.basename(tarball)
+ parentdir = os.path.dirname(tarball)
+ logger.debug("tarball: %s, in dir: %s", bfname, parentdir)
+ if bfname.endswith(".tar.bz2"):
+ loc = tarball.replace(".tar.bz2", "")
+ tar_unpack_option = "j"
+ elif bfname.endswith(".tar.gz"): # hmm, must be ancient
+ loc = tarball.replace(".tar.gz", "")
+ tar_unpack_option = "z"
+ elif bfname.endswith(".tar.xz"):
+ loc = tarball.replace(".tar.xz", "")
+ tar_unpack_option = "J"
+ else:
+ self.error("this doesn't look like a report tarball")
+ return None
+ self.set_change_origin(CH_SRC)
+ if os.path.isdir(loc):
+ if (os.stat(tarball).st_mtime - os.stat(loc).st_mtime) < 60:
+ return loc
+ utils.rmdir_r(loc)
+ cwd = os.getcwd()
+ if parentdir:
+ try:
+ os.chdir(parentdir)
+ except OSError as msg:
+ self.error(msg)
+ return None
+ try:
+ rc, tf_loc = ShellUtils().get_stdout("tar -t%s < %s 2> /dev/null | head -1" % (tar_unpack_option, utils.quote(bfname)))
+ if os.path.abspath(tf_loc) != os.path.abspath(loc):
+ logger.debug("top directory in tarball: %s, doesn't match the tarball name: %s", tf_loc, loc)
+ loc = os.path.join(os.path.dirname(loc), tf_loc)
+ except Exception as msg:
+ logger.error("%s: %s", tarball, msg)
+ return None
+ logger.debug("tar -x%s < %s", tar_unpack_option, utils.quote(bfname))
+ rc = utils.pipe_cmd_nosudo("tar -x%s < %s" % (tar_unpack_option, utils.quote(bfname)))
+ if self.source == "live":
+ os.remove(bfname)
+ os.chdir(cwd)
+ if rc != 0:
+ return None
+ return loc
+
+ def short_pe_path(self, pe_file):
+ return pe_file.replace("%s/" % self.loc, "")
+
+ def get_nodes(self):
+ def check_node(p):
+ pp = os.path.join(self.loc, p)
+ if os.path.isfile(os.path.join(pp, 'cib.xml')):
+ return p
+ return os.path.isdir(pp) and self.find_node_log(p)
+ nodes = sorted([os.path.basename(p)
+ for p in os.listdir(self.loc)
+ if check_node(p)])
+ if self.source == "live" and len(nodes) == 0:
+ nodes = [utils.this_node()]
+ return nodes
+
+ def check_nodes(self):
+ 'Verify if the nodes in cib match the nodes in the report.'
+ nl = self.get_nodes()
+ if not nl:
+ self.error("no nodes in report")
+ return False
+ for n in self.node_l:
+ if n not in nl:
+ self.warn("node %s not in report" % n)
+ else:
+ nl.remove(n)
+ return True
+
+ def check_report(self):
+ '''
+ Check some basic properties of the report.
+ '''
+ if not self.loc:
+ return False
+ if not os.access(self.desc, os.F_OK):
+ self.error("no description file in the report")
+ return False
+ if not self.check_nodes():
+ return False
+ return True
+
+ def _live_loc(self):
+ return os.path.join(self.report_cache_dir, "live")
+
+ def is_live_recent(self):
+ '''
+ Look at the last live report. If it's recent enough,
+ return True.
+ '''
+ try:
+ last_ts = os.stat(self.desc).st_mtime
+ return time.time() - last_ts <= self.live_recent
+ except:
+ return False
+
+ def is_live_very_recent(self):
+ '''
+ Look at the last live report. If it's recent enough,
+ return True.
+ '''
+ return (time.time() - self.last_live_update) <= self.short_live_recent
+
+ def prevent_live_update(self):
+ '''
+ Don't update live report if to_time is set (not open end).
+ '''
+ return self.to_dt is not None
+
+ def find_node_log(self, node):
+ p = os.path.join(self.loc, node)
+ for lf in _LOG_FILES:
+ if is_log(os.path.join(p, lf)):
+ return os.path.join(p, lf)
+ return None
+
+ def find_logs(self):
+ 'Return a list of logs found (one per node).'
+ l = []
+ for node in self.node_l:
+ log = self.find_node_log(node)
+ if log:
+ l.append(log)
+ else:
+ self.warn("no log found for node %s" % node)
+ if self.source == "live" and node == utils.this_node():
+ self.warn("Data collection fails if '%s' is not in sudoers file" % (utils.getuser()))
+ if len(l) == 0:
+ for lf in _LOG_FILES:
+ global_log = os.path.join(self.loc, lf)
+ if os.path.isfile(global_log):
+ l.append(global_log)
+ break
+ return l
+
+ def unpack_new_peinputs(self, node, pe_l):
+ '''
+ Untar PE inputs fetched from nodes.
+ '''
+ if not os.path.isdir(self.outdir):
+ return
+ fl = glob.glob("%s/*%s*" % (self.outdir, node))
+ if not fl:
+ return -1
+ u_dir = os.path.join(self.loc, node)
+ return utils.pipe_cmd_nosudo("tar -C %s -x < %s" % (u_dir, fl[0]))
+
+ def read_new_log(self, node):
+ '''
+ Get a list of log lines.
+ The log is put in self.outdir/node by parallax.
+ '''
+ if not os.path.isdir(self.outdir):
+ return []
+ fl = glob.glob("%s/*%s*" % (self.outdir, node))
+ if not fl:
+ return []
+ try:
+ f = open(fl[0])
+ except IOError as msg:
+ logger.error("open %s: %s", fl[0], msg)
+ return []
+ return f.readlines()
+
+ def update_live_report(self, next_loglines, next_peinputs):
+ '''
+ Update the existing live report, if it's older than
+ self.short_live_recent:
+ - append newer logs
+ - get new PE inputs
+ TODO: FIXME: broken now
+ '''
+ logger.info("Fetching updated logs from cluster nodes. Please wait...")
+ logger.debug("Candidate logs: %s", self.log_l)
+ to_update = []
+ for rptlog in self.log_l:
+ node = log2node(rptlog)
+ logf, pos = read_log_info(rptlog)
+ if logf:
+ logger.debug("Updating %s : %s : %s : %s", node, rptlog, logf, pos)
+ to_update.append([node, rptlog, logf, pos])
+ if not to_update:
+ logger.info("No updatable logs found (missing .info for logs)")
+ return False
+
+ utils.rmdir_r(self.outdir)
+ utils.rmdir_r(self.errdir)
+ self.last_live_update = time.time()
+
+ end_time = self._str_dt(self.get_rpt_dt(self.to_dt, "bottom"))
+ rc1 = next_loglines(to_update, self.outdir, self.errdir, end_time)
+ append_newlogs(self.outdir, to_update)
+
+ # read new logs
+ # find any missing pefiles
+ # return list of missing pefiles
+ # fetch missing pefiles from nodes
+ # unpack missing pefiles
+ # node_pe_l: [(node, [pefile ...]) ...]
+ node_pe_l = self.logparser.scan(mode='refresh')
+ rc2 = next_peinputs(node_pe_l, self.outdir, self.errdir)
+ unpack_rc = 0
+ for node, pe_l in node_pe_l:
+ unpack_rc |= self.unpack_new_peinputs(node, pe_l)
+ rc2 |= (unpack_rc == 0)
+ utils.rmdir_r(self.outdir)
+ utils.rmdir_r(self.errdir)
+
+ return rc1 and rc2
+
+ def get_live_report(self):
+ loc = None
+ with utils.lock(self.report_cache_dir):
+ loc = self.new_live_report()
+ return loc
+
+ def manage_live_report(self, force=False, no_live_update=False):
+ '''
+ Update or create live report.
+ '''
+ d = self._live_loc()
+
+ created_now = False
+
+ # Create live report if it doesn't exist
+ if not d or not os.path.isdir(d):
+ created_now, d = True, self.get_live_report()
+ if not self.loc:
+ # the live report is there, but we were just invoked
+ self.loc = d
+ self.report_setup()
+ if not force and self.is_live_recent():
+ # try just to refresh the live report
+ if self.to_dt or self.is_live_very_recent() or no_live_update:
+ return self._live_loc()
+ _HAS_PARALLAX = False
+ try:
+ from .crm_pssh import next_loglines, next_peinputs
+ _HAS_PARALLAX = True
+ except:
+ pass
+ if _HAS_PARALLAX:
+ rc = None
+ with utils.lock(self.report_cache_dir):
+ rc = self.update_live_report(next_loglines, next_peinputs)
+ if rc is None:
+ return None
+ if rc:
+ self.set_change_origin(CH_UPD)
+ return self._live_loc()
+ else:
+ logger.warning("parallax library not installed, slow live updates ahead")
+ if not created_now:
+ return self.get_live_report()
+ return self.loc
+
+ def new_live_report(self):
+ '''
+ Run the report command to get logs now.
+ '''
+ extcmd = "crm report"
+
+ d = self._live_loc()
+ if not utils.is_path_sane(d):
+ return None
+ utils.rmdir_r(d)
+ _, ext = core.pick_first_compress()
+ if not ext:
+ return None
+ tarball = f"{d}.tar{ext}"
+ to_option = ""
+ if self.to_dt:
+ to_option = "-t '%s'" % logtime.human_date(self.to_dt)
+ nodes_option = ""
+ if self.setnodes:
+ nodes_option = "'-n %s'" % ' '.join(self.setnodes)
+ utils.mkdirp(os.path.dirname(d))
+ logger.info("Retrieving information from cluster nodes, please wait...")
+ cmd = "{} {} -Z -Q -f '{}' {} {} {} {}".format(extcmd,
+ "-v" if config.core.debug else "", self.from_dt.ctime(),
+ to_option, nodes_option, str(config.core.report_tool_options), d)
+ logger.debug("Running command: {}".format(cmd))
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd)
+ if rc != 0:
+ if err:
+ print(err)
+ if os.path.isfile(tarball):
+ self.warn("report thinks it failed, proceeding anyway")
+ else:
+ self.error("report failed")
+ return None
+ if out:
+ print(out)
+ self.last_live_update = time.time()
+ return self.unpack_report(tarball)
+
+ def set_source(self, src):
+ 'Set our source.'
+ if self.source != src:
+ self.set_change_origin(CH_SRC)
+ self.source = src
+ self.loc = None
+ self.ready = False
+
+ def set_period(self, from_dt, to_dt):
+ '''
+ Set from/to_dt.
+ '''
+ logger.debug("setting report times: <%s> - <%s>", from_dt, to_dt)
+ self.from_dt = from_dt
+ self.to_dt = to_dt
+
+ refresh = False
+ if self.source == "live" and self.ready:
+ top_dt = self.get_rpt_dt(None, "top")
+ if top_dt is None:
+ return False
+ refresh = from_dt and top_dt > from_dt
+ if refresh:
+ self.set_change_origin(CH_UPD)
+ return self.refresh_source(force=True)
+ else:
+ self.set_change_origin(CH_TIME)
+ self.report_setup()
+ return True
+
+ def set_detail(self, detail_lvl):
+ '''
+ Set the detail level.
+ '''
+ self.detail = int(detail_lvl)
+ if self.logparser:
+ self.logparser.detail = self.detail
+
+ def set_nodes(self, *args):
+ '''
+ Allow user to set the node list (necessary if the host is
+ not part of the cluster).
+ '''
+ self.setnodes = args
+
+ def get_cib_loc(self):
+ if not self.node_l:
+ return ""
+ return os.path.join(self.loc, self.node_l[0], "cib.xml")
+
+ def read_cib(self):
+ '''
+ Get some information from the report's CIB (node list,
+ resource list, groups). If "live" then use cibadmin.
+ '''
+ self.cib = logparser.CibInfo(self.loc)
+
+ def set_node_colors(self):
+ i = 0
+ for n in self.node_l:
+ self.nodecolor[n] = self.nodecolors[i]
+ i = (i+1) % len(self.nodecolors)
+
+ def _report_setup_source(self):
+ # is this an crm report or a crm_report?
+ for descname in ("description.txt", "report.summary"):
+ self.desc = os.path.join(self.loc, descname)
+ if os.path.isfile(self.desc):
+ yr = os.stat(self.desc).st_mtime
+ logger.debug("Found %s, created %s", descname, yr)
+ self._creation_time = time.strftime("%a %d %b %H:%M:%S %Z %Y",
+ time.localtime(yr))
+ if descname == 'report.summary':
+ self._creator = "crm_report"
+ else:
+ self._creator = 'unknown'
+ logtime.set_year(yr)
+ break
+ else:
+ self.error("Invalid report: No description found")
+ return
+
+ self.node_l = self.get_nodes()
+ self.set_node_colors()
+ self.log_l = self.find_logs()
+ self.read_cib()
+
+ def _report_setup_update(self):
+ l = self.get_nodes()
+ if self.node_l != l:
+ self.node_l = l
+ self.set_node_colors()
+ self.log_l = self.find_logs()
+ self.read_cib()
+
+ def report_setup(self):
+ if self.change_origin == 0:
+ return False
+ if not self.loc:
+ return False
+
+ if self.change_origin == CH_SRC:
+ self._report_setup_source()
+ elif self.change_origin == CH_UPD:
+ self._report_setup_update()
+
+ if self.logparser is None:
+ self.logparser = logparser.LogParser(self.loc, self.cib, self.log_l, self.detail)
+ self.logparser.scan()
+ self.logparser.set_timeframe(self.from_dt, self.to_dt)
+
+ self.ready = self.check_report()
+ self.set_change_origin(0)
+
+ def prepare_source(self, no_live_update=False):
+ '''
+ Unpack a report tarball.
+ For "live", create an ad-hoc report and unpack it
+ somewhere in the cache area.
+ Parse the period.
+ '''
+ if not self.source:
+ logger.error("no source set yet")
+ return False
+ if self.ready and (no_live_update or self.source != "live"):
+ return True
+ if self.source == "live":
+ self.loc = self.manage_live_report(no_live_update=no_live_update)
+ elif os.path.isfile(self.source):
+ self.loc = self.unpack_report(self.source)
+ elif os.path.isdir(self.source):
+ self.loc = self.source
+ if not self.loc:
+ return False
+ self.report_setup()
+ return self.ready
+
+ def refresh_source(self, force=False):
+ '''
+ Refresh report from live,
+ or clear metadata cache for non-live report
+ '''
+ if self.source == "live":
+ self.last_live_update = 0
+ self.loc = self.manage_live_report(force=force)
+ self.report_setup()
+ return self.ready
+ else:
+ print("Refreshing log data...")
+ if not self.ready:
+ self.set_change_origin(CH_TIME)
+ self.prepare_source()
+ missing_pes = self.logparser.scan(mode='force')
+ if len(missing_pes):
+ print("%d transitions, %d events and %d missing PE input files." % tuple(self.logparser.count() + (len(missing_pes),)))
+ else:
+ print("%d transitions, %d events." % self.logparser.count())
+
+ def _str_nodecolor(self, node, s):
+ try:
+ clr = self.nodecolor[node]
+ except:
+ return s
+ try:
+ return "${%s}%s${NORMAL}" % (clr, s)
+ except:
+ s = s.replace("${", "$.{")
+ return "${%s}%s${NORMAL}" % (clr, s)
+
+ def match_filter_out(self, s):
+ for regexp in self.log_filter_out_re:
+ if regexp.search(s):
+ return True
+ return False
+
+ def display_logs(self, l):
+ def color_nodes(s):
+ node = logtime.syslog2node(s)
+ return self._str_nodecolor(node, s) if node is not None else s
+
+ if self.log_filter_out_re:
+ utils.page_gen(color_nodes(x) for x in l if not self.match_filter_out(x))
+ else:
+ utils.page_gen(color_nodes(x) for x in l)
+
+ def show_logs(self, nodes=None):
+ '''
+ Print log lines, either all or matching a given node
+ '''
+ self.display_logs(self.logparser.get_logs(nodes=nodes))
+
+ def get_source(self):
+ return self.source
+
+ def get_desc_line(self, fld):
+ try:
+ f = open(self.desc)
+ except IOError as msg:
+ logger.error("open %s: %s", self.desc, msg)
+ return
+ for s in f:
+ if s.startswith("%s: " % fld):
+ f.close()
+ s = s.replace("%s: " % fld, "").rstrip()
+ return s
+ f.close()
+
+ def short_peinputs_list(self):
+ '''There could be quite a few transitions, limit the
+ output'''
+ max_output = 20
+ s = ""
+ transitions = list(self.logparser.get_transitions())
+ if len(transitions) > max_output:
+ s = "... "
+
+ def fmt(t):
+ if len(t.tags):
+ return self._str_nodecolor(t.dc, t.pe_num) + "*"
+ return self._str_nodecolor(t.dc, t.pe_num)
+
+ return "%s%s" % (s, ' '.join([fmt(x) for x in transitions[-max_output:]]))
+
+ def get_rpt_dt(self, dt, whence):
+ '''
+ Figure out the time of the start/end of the report.
+ The ts input is the time stamp set by user (it can be
+ empty). whence is set either to "top" or "bottom".
+ '''
+ def first_line(l):
+ l.seek(0)
+ return utils.to_ascii(l.readline()).rstrip()
+
+ def last_line(l):
+ '''Note: assumes that the last log line isn't > 2048 characters'''
+ l.seek(-2048, os.SEEK_END)
+ return utils.to_ascii(l.readlines()[-1]).rstrip()
+
+ if dt:
+ return dt
+ try:
+ if whence == "top":
+ myts = min(logtime.syslog_ts(x) for x in (first_line(l) for l in self.logparser.fileobjs))
+ elif whence == "bottom":
+ myts = max(logtime.syslog_ts(x) for x in (last_line(l) for l in self.logparser.fileobjs))
+ if myts:
+ return utils.timestamp_to_datetime(myts)
+ logger.debug("No log lines with timestamps found in report")
+ except Exception as e:
+ logger.debug("Error: %s", e)
+ return None
+
+ def _str_dt(self, dt):
+ return dt and logtime.human_date(dt) or "--/--/-- --:--:--"
+
+ def info(self):
+ '''
+ Print information about the source.
+ '''
+ if not self.prepare_source():
+ return False
+
+ created_on = self.get_desc_line("Date") or self._creation_time
+ created_by = self.get_desc_line("By") or self._creator
+
+ utils.page_string(
+ '\n'.join(("Source: %s" % self.source,
+ "Created on: %s" % (created_on),
+ "By: %s" % (created_by),
+ "Period: %s - %s" %
+ (self._str_dt(self.get_rpt_dt(self.from_dt, "top")),
+ self._str_dt(self.get_rpt_dt(self.to_dt, "bottom"))),
+ "Nodes: %s" % ' '.join([self._str_nodecolor(x, x)
+ for x in self.node_l]),
+ "Groups: %s" % ' '.join(list(self.cib.groups.keys())),
+ "Clones: %s" % ' '.join(list(self.cib.clones.keys())),
+ "Resources: %s" % ' '.join(self.cib.primitives),
+ "Transitions: %s" % self.short_peinputs_list())))
+
+ def events(self):
+ '''
+ Show all events.
+ '''
+ if not self.prepare_source():
+ return False
+
+ self.display_logs(self.logparser.get_events())
+
+ def find_transition(self, t_str):
+ for t_obj in self.logparser.get_transitions():
+ if str(t_obj) == t_str:
+ return t_obj
+ return None
+
+ def show_transition_log(self, rpt_pe_file, full_log=False):
+ '''
+ Search for events within the given transition.
+ '''
+ if not self.prepare_source(no_live_update=self.prevent_live_update()):
+ return False
+ t_obj = self.find_transition(rpt_pe2t_str(rpt_pe_file))
+ if not t_obj:
+ logger.error("%s: transition not found", rpt_pe_file)
+ return False
+ # limit the log scope temporarily
+ self.logparser.set_timeframe(t_obj.start_ts, t_obj.end_ts)
+ if full_log:
+ self.show_logs()
+ else:
+ t_obj.transition_info()
+ self.events()
+ self.logparser.set_timeframe(self.from_dt, self.to_dt)
+ return True
+
+ def get_transition_tags(self, rpt_pe_file):
+ '''
+ Returns the tags for the transition as a sorted list
+ '''
+ t_obj = self.find_transition(rpt_pe2t_str(rpt_pe_file))
+ if not t_obj:
+ logger.error("%s: transition not found", rpt_pe_file)
+ return None
+ return sorted(t_obj.tags)
+
+ def resource(self, *args):
+ '''
+ Show resource events.
+ '''
+ if not self.prepare_source(no_live_update=self.prevent_live_update()):
+ return False
+ self.display_logs(self.logparser.get_events(event="resource", resources=args))
+
+ def node(self, *args):
+ '''
+ Show node events.
+ '''
+ if not self.prepare_source(no_live_update=self.prevent_live_update()):
+ return False
+ self.display_logs(self.logparser.get_events(event="node", nodes=args))
+
+ def show_log(self, *nodes):
+ '''
+ Show logs for a node or all nodes.
+ '''
+ if not self.prepare_source():
+ return False
+ self.show_logs(nodes=nodes)
+
+ def pe_detail_format(self, t_obj):
+ l = [
+ utils.shortdate(t_obj.start_ts),
+ utils.shorttime(t_obj.start_ts),
+ t_obj.end_ts and utils.shorttime(t_obj.end_ts) or "--:--:--",
+ # the format string occurs also below
+ self._str_nodecolor(t_obj.dc, '%-13s' % t_obj.shortname())
+ ]
+ l += utils.get_cib_attributes(t_obj.path(), "cib",
+ ("update-client", "update-user", "update-origin"),
+ ("no-client", "no-user", "no-origin"))
+ l += [" ".join(sorted(t_obj.tags))]
+ return '%s %s %s %-13s %-10s %-10s %s %s' % tuple(l)
+
+ def pelist(self, a=None, verbose=False):
+ pe_details_hdr = "Date Start End Filename Client User Origin Tags"
+ pe_details_sep = "==== ===== === ======== ====== ==== ====== ===="
+ if not self.prepare_source(no_live_update=self.prevent_live_update()):
+ return []
+ if isinstance(a, (tuple, list)):
+ if len(a) == 1:
+ a.append(a[0])
+ elif a is not None:
+ a = [a, a]
+ l = [verbose and self.pe_detail_format(t_obj) or t_obj.path()
+ for t_obj in self.logparser.get_transitions() if pe_file_in_range(t_obj.pe_file, a)]
+ if verbose:
+ l = [pe_details_hdr, pe_details_sep] + l
+ return l
+
+ def show_transitions(self):
+ if not self.prepare_source(no_live_update=self.prevent_live_update()):
+ return []
+ return ["%-30s %-15s %-15s Tags" % ("Time", "Name", "Node")] + [t.description() for t in self.logparser.get_transitions()]
+
+ def dotlist(self, a=None):
+ l = [x.replace("bz2", "dot") for x in self.pelist(a)]
+ return [x for x in l if os.path.isfile(x)]
+
+ def find_pe_files(self, path):
+ 'Find a PE or dot file matching part of the path.'
+ pe_l = path.endswith(".dot") and self.dotlist() or self.pelist()
+ return [x for x in pe_l if x.find(path) >= 0]
+
+ def pe2dot(self, f):
+ f = f.replace("bz2", "dot")
+ if os.path.isfile(f):
+ return f
+ return None
+
+ def find_file(self, f):
+ return utils.file_find_by_name(self.loc, f)
+
+ def get_session_dir(self, name):
+ try:
+ return os.path.join(self.report_cache_dir, self.session_sub, name)
+ except:
+ return os.path.join(self.report_cache_dir, self.session_sub)
+ state_file = 'history_state.cfg'
+ rpt_section = 'report'
+
+ def save_state(self, sdir):
+ '''
+ Save the current history state. It should include:
+ - directory
+ - timeframe
+ - detail
+ TODO
+ '''
+ p = configparser.ConfigParser()
+ p.add_section(self.rpt_section)
+ p.set(self.rpt_section, 'dir',
+ self.source == "live" and sdir or self.source)
+ p.set(self.rpt_section, 'from_time',
+ self.from_dt and logtime.human_date(self.from_dt) or '')
+ p.set(self.rpt_section, 'to_time',
+ self.to_dt and logtime.human_date(self.to_dt) or '')
+ p.set(self.rpt_section, 'detail', str(self.detail))
+ self.manage_excludes("save", p)
+ fname = os.path.join(sdir, self.state_file)
+ try:
+ f = open(fname, "wt")
+ except IOError as msg:
+ logger.error("Failed to save state: %s", msg)
+ return False
+ p.write(f)
+ f.close()
+ return True
+
+ def load_state(self, sdir):
+ '''
+ Load the history state from a file.
+ '''
+ p = configparser.ConfigParser()
+ fname = os.path.join(sdir, self.state_file)
+ try:
+ p.read(fname)
+ except Exception as msg:
+ logger.error("Failed to load state: %s", msg)
+ return False
+ rc = True
+ try:
+ for n, v in p.items(self.rpt_section):
+ if n == 'dir':
+ self.set_source(v)
+ if not os.path.exists(v):
+ logger.error("session state file %s points to a non-existing directory: %s", fname, v)
+ rc = False
+ elif n == 'from_time':
+ self.from_dt = v and utils.parse_time(v) or None
+ elif n == 'to_time':
+ self.to_dt = v and utils.parse_time(v) or None
+ elif n == 'detail':
+ self.set_detail(v)
+ else:
+ logger.warning("unknown item %s in the session state file %s", n, fname)
+ rc |= self.manage_excludes("load", p)
+ except configparser.NoSectionError as msg:
+ logger.error("session state file %s: %s", fname, msg)
+ rc = False
+ except Exception as msg:
+ logger.error("%s: bad value '%s' for '%s' in session state file %s", msg, v, n, fname)
+ rc = False
+ if rc:
+ self.set_change_origin(CH_SRC)
+ return rc
+
+ def set_change_origin(self, org):
+ '''Set origin only to a smaller value (if current > 0).
+ This prevents lesser change_origin overwriting a greater
+ one.
+ '''
+ if self.change_origin == 0 or org < self.change_origin:
+ self.change_origin = org
+
+ def manage_session(self, subcmd, name):
+ session_dir = self.get_session_dir(name)
+ if not utils.is_path_sane(session_dir):
+ return False
+ if subcmd == "save" and os.path.exists(session_dir):
+ logger.error("history session %s exists", name)
+ return False
+ elif subcmd in ("load", "pack", "update", "delete") and not os.path.exists(session_dir):
+ logger.error("history session %s does not exist", name)
+ return False
+ if subcmd == "save":
+ utils.mkdirp(session_dir)
+ if self.source == "live":
+ rc = utils.pipe_cmd_nosudo("tar -C '%s' -c . | tar -C '%s' -x" %
+ (self._live_loc(), session_dir))
+ if rc != 0:
+ return False
+ return self.save_state(session_dir)
+ elif subcmd == "update":
+ return self.save_state(session_dir)
+ elif subcmd == "load":
+ return self.load_state(session_dir)
+ elif subcmd == "delete":
+ utils.rmdir_r(session_dir)
+ elif subcmd == "list":
+ for l in self.session_list():
+ print(l)
+ elif subcmd == "pack":
+ return mkarchive(session_dir)
+ return True
+ log_section = 'log'
+
+ def manage_excludes(self, cmd, arg=None):
+ '''Exclude messages from log files.
+ arg: None (show, clear)
+ regex (add)
+ instance of ConfigParser.ConfigParser (load, save)
+ '''
+ if not self.prepare_source(no_live_update=True):
+ return False
+ rc = True
+ if cmd == "show":
+ print('\n'.join(self.log_filter_out))
+ elif cmd == "clear":
+ self.log_filter_out = []
+ self.log_filter_out_re = []
+ elif cmd == "add":
+ try:
+ regex = re.compile(arg)
+ self.log_filter_out.append(arg)
+ self.log_filter_out_re.append(regex)
+ except Exception as msg:
+ logger.error("bad regex %s: %s", arg, msg)
+ rc = False
+ elif cmd == "save" and self.log_filter_out:
+ arg.add_section(self.log_section)
+ for i in range(len(self.log_filter_out)):
+ arg.set(self.log_section, 'exclude_%d' % i,
+ self.log_filter_out[i])
+ elif cmd == "load":
+ self.manage_excludes("clear")
+ try:
+ for n, v in arg.items(self.log_section):
+ if n.startswith('exclude_'):
+ rc |= self.manage_excludes("add", v)
+ else:
+ logger.warning("unknown item %s in the section %s", n, self.log_section)
+ except configparser.NoSectionError:
+ pass
+ return rc
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/idmgmt.py b/crmsh/idmgmt.py
new file mode 100644
index 0000000..e0f2747
--- /dev/null
+++ b/crmsh/idmgmt.py
@@ -0,0 +1,193 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+#
+# Make sure that ids are unique.
+
+import re
+import copy
+from . import constants
+from . import xmlutil
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+_id_store = {}
+_state = []
+ok = True # error var
+
+
+def push_state():
+ _state.append(copy.deepcopy(_id_store))
+
+
+def pop_state():
+ try:
+ global _id_store
+ _id_store = _state.pop()
+ return True
+ except IndexError:
+ return False
+
+
+def drop_state():
+ try:
+ _state.pop()
+ except KeyError:
+ pass
+
+
+def clean_state():
+ global _state
+ _state = []
+
+
+def new(node, pfx):
+ '''
+ Create a unique id for the xml node.
+ '''
+ if re.search(r'^\d+$', pfx) and node.tag != "node":
+ pfx = "num-{}".format(pfx)
+ name = node.get("name")
+ if node.tag == "nvpair":
+ node_id = "%s-%s" % (pfx, name)
+ elif node.tag == "op":
+ interval = node.get("interval")
+ if interval:
+ node_id = "%s-%s-%s" % (pfx, name, interval)
+ else:
+ node_id = "%s-%s" % (pfx, name)
+ else:
+ subpfx = constants.subpfx_list.get(node.tag, '')
+ if subpfx:
+ node_id = "%s-%s" % (pfx, subpfx)
+ else:
+ node_id = pfx
+ if is_used(node_id):
+ node_id = _gen_free_id(node_id)
+ save(node_id)
+ return node_id
+
+
+def _gen_free_id(node_id):
+ "generate a unique id"
+ # shouldn't really get here
+ for cnt in range(99):
+ try_id = "%s-%d" % (node_id, cnt)
+ if not is_used(try_id):
+ node_id = try_id
+ break
+ return node_id
+
+
+def check_node(node, lvl):
+ global ok
+ node_id = node.get("id")
+ if not node_id:
+ return
+ if id_in_use(node_id):
+ logger.error("id_store: id %s is in use", node_id)
+ ok = False
+ return
+
+
+def _store_node(node, lvl):
+ save(node.get("id"))
+
+
+def _drop_node(node, lvl):
+ remove(node.get("id"))
+
+
+def check_xml(node):
+ global ok
+ ok = True
+ xmlutil.xmltraverse_thin(node, check_node)
+ return ok
+
+
+def store_xml(node):
+ if not check_xml(node):
+ return False
+ xmlutil.xmltraverse_thin(node, _store_node)
+ return True
+
+
+def remove_xml(node):
+ xmlutil.xmltraverse_thin(node, _drop_node)
+
+
+def replace_xml(oldnode, newnode):
+ remove_xml(oldnode)
+ if not store_xml(newnode):
+ store_xml(oldnode)
+ return False
+ return True
+
+
+def is_used(node_id):
+ return node_id in _id_store
+
+
+def id_in_use(obj_id):
+ if is_used(obj_id):
+ logger_utils.id_used_err(obj_id)
+ return True
+ return False
+
+
+def save(node_id):
+ if not node_id:
+ return
+ _id_store[node_id] = 1
+
+
+def rename(old_id, new_id):
+ if not old_id or not new_id:
+ return
+ if not is_used(old_id):
+ return
+ if is_used(new_id):
+ return
+ remove(old_id)
+ save(new_id)
+
+
+def remove(node_id):
+ if not node_id:
+ return
+ try:
+ del _id_store[node_id]
+ except KeyError:
+ pass
+
+
+def clear():
+ global _id_store
+ global _state
+ _id_store = {}
+ _state = []
+
+
+def set_id(node, oldnode, id_hint, id_required=True):
+ '''
+ Set the id attribute for the node.
+ - if the node already contains "id", keep it
+ - if the old node contains "id", copy that
+ - if the node contains "uname", copy that
+ - else if required, create a new one using id_hint
+ - save the new id in idmgmt.
+ '''
+ old_id = oldnode.get("id") if oldnode is not None else None
+ new_id = node.get("id") or old_id or node.get("uname")
+ if new_id:
+ save(new_id)
+ elif id_required:
+ new_id = new(node, id_hint)
+ if new_id:
+ node.set("id", new_id)
+ if oldnode is not None and old_id == new_id:
+ xmlutil.set_id_used_attr(oldnode)
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/lock.py b/crmsh/lock.py
new file mode 100644
index 0000000..3bcdc12
--- /dev/null
+++ b/crmsh/lock.py
@@ -0,0 +1,197 @@
+# Copyright (C) 2020 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+
+
+import re
+import time
+from contextlib import contextmanager
+
+from . import sh
+from . import config
+from . import log
+from .sh import ShellUtils
+
+
+logger = log.setup_logger(__name__)
+
+
+class SSHError(Exception):
+ """
+ Custom exception for ssh error
+ """
+
+
+class ClaimLockError(Exception):
+ """
+ Custom exception if claiming lock failed or wait lock release timed out
+ """
+
+
+class Lock(object):
+ """
+ A base class define a lock mechanism used to exclude other nodes
+ """
+
+ LOCK_DIR_DEFAULT = "/run/.crmsh_lock_directory"
+
+ def __init__(self, lock_dir=None):
+ """
+ Init function
+ """
+ # only the lock owner can unlock
+ self.lock_owner = False
+ self.lock_dir = lock_dir or self.LOCK_DIR_DEFAULT
+
+ def _run(self, cmd):
+ """
+ Run command on local
+ """
+ return ShellUtils().get_stdout_stderr(cmd)
+
+ def _create_lock_dir(self):
+ """
+ Create lock directory, mkdir command was atomic
+ """
+ cmd = "mkdir {}".format(self.lock_dir)
+ rc, _, _ = self._run(cmd)
+ if rc == 0:
+ self.lock_owner = True
+ return True
+ return False
+
+ def _lock_or_fail(self):
+ """
+ Raise ClaimLockError if claiming lock failed
+ """
+ if not self._create_lock_dir():
+ raise ClaimLockError("Failed to claim lock (the lock directory exists at {})".format(self.lock_dir))
+
+ def _unlock(self):
+ """
+ Remove the lock directory
+ """
+ if self.lock_owner:
+ cmd = "rm -rf {}".format(self.lock_dir)
+ self._run(cmd)
+
+ @contextmanager
+ def lock(self):
+ """
+ Create lock directory on local, and remove it finally
+ Might raise ClaimLockError
+ """
+ try:
+ self._lock_or_fail()
+ yield
+ except:
+ raise
+ finally:
+ self._unlock()
+
+
+class RemoteLock(Lock):
+ """
+ A class inherited from Lock class
+ Define the behavior how to claim lock on remote node and how to wait the lock released
+ """
+
+ SSH_TIMEOUT = 10
+ SSH_OPTION = "-o ConnectTimeout={} -o StrictHostKeyChecking=no".format(SSH_TIMEOUT)
+ SSH_EXIT_ERR = 255
+ MIN_LOCK_TIMEOUT = 120
+ WAIT_INTERVAL = 10
+
+ def __init__(self, remote_node, for_join=True, lock_dir=None, wait=True, no_warn=False):
+ """
+ Init function
+ """
+ self.remote_node = remote_node
+ self.for_join = for_join
+ self.wait = wait
+ self.no_warn = no_warn
+ super(__class__, self).__init__(lock_dir=lock_dir)
+
+ def _run(self, cmd):
+ """
+ Run command on remote node
+ """
+ # TODO: pass SSH_OPTION
+ rc, out, err = sh.cluster_shell().get_rc_stdout_stderr_without_input(self.remote_node, cmd)
+ if rc == self.SSH_EXIT_ERR:
+ raise SSHError(err)
+ return rc, out, err
+
+ @property
+ def lock_timeout(self):
+ """
+ Get lock_timeout from config.core
+ """
+ try:
+ value = int(config.core.lock_timeout)
+ except ValueError:
+ raise ValueError("Invalid format of core.lock_timeout(should be a number)")
+ if value < self.MIN_LOCK_TIMEOUT:
+ raise ValueError("Minimum value of core.lock_timeout should be {}".format(self.MIN_LOCK_TIMEOUT))
+ return value
+
+ def _get_online_nodelist(self):
+ """
+ Get the online node list from remote node
+ """
+ rc, out, err = self._run("crm_node -l")
+ if rc != 0 and err:
+ raise ValueError(err)
+ return re.findall('[0-9]+ (.*) member', out)
+
+ def _lock_or_wait(self):
+ """
+ Try to claim lock on remote node, wait if failed to claim
+ raise ClaimLockError if reached the lock_timeout
+ """
+ warned_once = False
+ online_list = []
+ pre_online_list = []
+ expired_error_str = "Cannot continue since the lock directory exists at the node ({}:{})".format(self.remote_node, self.lock_dir)
+
+ current_time = int(time.time())
+ timeout = current_time + self.lock_timeout
+ while current_time <= timeout:
+
+ # Try to claim the lock
+ if self._create_lock_dir():
+ # Success
+ break
+
+ if self.for_join:
+ # Might lose claiming lock again, start to wait again
+ online_list = self._get_online_nodelist()
+ if pre_online_list and pre_online_list != online_list:
+ timeout = current_time + self.lock_timeout
+ pre_online_list = online_list
+
+ if not self.no_warn and not warned_once:
+ warned_once = True
+ logger.warning("Might have unfinished process on other nodes, wait %ss...", self.lock_timeout)
+
+ time.sleep(self.WAIT_INTERVAL)
+ current_time = int(time.time())
+
+ else:
+ raise ClaimLockError("Timed out after {} seconds. {}".format(self.lock_timeout, expired_error_str))
+
+ @contextmanager
+ def lock(self):
+ """
+ Create lock directory on remote, and remove it finally
+ Might raise SSHError, ClaimLockError and ValueError
+ """
+ try:
+ if self.wait:
+ self._lock_or_wait()
+ else:
+ self._lock_or_fail()
+ yield
+ except:
+ raise
+ finally:
+ self._unlock()
diff --git a/crmsh/log.py b/crmsh/log.py
new file mode 100644
index 0000000..65c0d7e
--- /dev/null
+++ b/crmsh/log.py
@@ -0,0 +1,574 @@
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+import socket
+import shutil
+import logging
+import logging.config
+import typing
+from contextlib import contextmanager
+
+from . import options
+from . import constants
+
+DEBUG2 = logging.DEBUG + 5
+CRMSH_LOG_FILE = "/var/log/crmsh/crmsh.log"
+
+
+class DEBUG2Logger(logging.Logger):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def debug2(self, msg, *args, **kwargs):
+ if self.isEnabledFor(DEBUG2):
+ self._log(DEBUG2, msg, args, **kwargs)
+
+
+class NumberedLoggerInterface(DEBUG2Logger):
+ """
+ Interface to prepend a number to the message, used for regression test. When this class is used directly, no numbers are prepend.
+ """
+ lineno = -1
+
+ @classmethod
+ def reset_lineno(cls, to=0):
+ pass
+
+ @classmethod
+ def incr_lineno(cls):
+ pass
+
+
+class NumberedLogger(NumberedLoggerInterface):
+ """
+ Prepend a number to the message, used for regression test
+ """
+ lineno = -1
+
+ def _log( self, level, msg, args, **kwargs):
+ if NumberedLogger.lineno > 0:
+ msg = f'{self.lineno}: {msg}'
+ super()._log(level, msg, args, **kwargs)
+
+ @classmethod
+ def reset_lineno(cls, to=0):
+ cls.lineno = to
+
+ @classmethod
+ def incr_lineno(cls):
+ cls.lineno += 1
+
+ if (sys.version_info.major, sys.version_info.minor) > (3, 6):
+ def findCaller(self, stack_info=False, stacklevel=1):
+ return super().findCaller(stack_info, stacklevel+1)
+ else:
+ def findCaller(self, stack_info=False):
+ if stack_info:
+ return super().findCaller(stack_info)
+ else:
+ f = sys._getframe(4)
+ co = f.f_code
+ sinfo = None
+ return co.co_filename, f.f_lineno, co.co_name, sinfo
+
+
+class ConsoleCustomHandler(logging.StreamHandler):
+ """
+ A custom handler for console
+
+ Redirect ERROR/WARNING/DEBUG message to sys.stderr
+ Redirect INFO message to sys.stdout
+ """
+
+ def emit(self, record):
+ if record.levelno == logging.INFO:
+ stream = sys.stdout
+ else:
+ stream = sys.stderr
+ msg = self.format(record)
+ stream.write(msg)
+ stream.write(self.terminator)
+
+
+class NoBacktraceFormatter(logging.Formatter):
+ """Suppress backtrace unless option debug is set."""
+ def format(self, record):
+ """
+ Format the specified record as text.
+
+ The record's attribute dictionary is used as the operand to a
+ string formatting operation which yields the returned string.
+ Before formatting the dictionary, a couple of preparatory steps
+ are carried out. The message attribute of the record is computed
+ using LogRecord.getMessage(). If the formatting string uses the
+ time (as determined by a call to usesTime(), formatTime() is
+ called to format the event time. If there is exception information,
+ it is formatted using formatException() and appended to the message.
+ """
+ if record.exc_info or record.stack_info:
+ from crmsh import config
+ if config.core.debug:
+ return super().format(record)
+ else:
+ record.message = record.getMessage()
+ if self.usesTime():
+ record.asctime = self.formatTime(record, self.datefmt)
+ return self.formatMessage(record)
+ else:
+ return super().format(record)
+
+
+class ConsoleColoredFormatter(NoBacktraceFormatter):
+ """Print levelname with colors and suppress backtrace."""
+ COLORS = {
+ logging.WARNING: constants.YELLOW,
+ logging.INFO: constants.GREEN,
+ logging.ERROR: constants.RED
+ }
+ FORMAT = "%(levelname)s: %(message)s"
+
+ def __init__(self, fmt=None):
+ super().__init__(fmt)
+ if not fmt:
+ fmt = self.FORMAT
+ self._colored_formatter: typing.Mapping[int, logging.Formatter] = {
+ level: NoBacktraceFormatter(fmt.replace('%(levelname)s', f'{color}%(levelname)s{constants.END}'))
+ for level, color in self.COLORS.items()
+ }
+
+ def format(self, record):
+ colored_formatter = self._colored_formatter.get(record.levelno)
+ if colored_formatter is not None:
+ return colored_formatter.format(record)
+ else:
+ return super().format(record)
+
+
+class LeveledFormatter(logging.Formatter):
+ """Format log according to log level."""
+ def __init__(self, base_formatter_factory, default_fmt: str = None, level_fmt: typing.Mapping[int, str] = None):
+ super().__init__()
+ self.default_formatter = base_formatter_factory(default_fmt)
+ self.level_formatter = {
+ level: base_formatter_factory(fmt)
+ for level, fmt in level_fmt.items()
+ }
+
+ def format(self, record):
+ formatter = self.level_formatter.get(record.levelno)
+ if formatter is None:
+ formatter = self.default_formatter
+ return formatter.format(record)
+
+
+class DebugCustomFilter(logging.Filter):
+ """
+ A custom filter for debug message
+ """
+ def filter(self, record):
+ from .config import core
+ if record.levelno == logging.DEBUG:
+ return core.debug
+ else:
+ return True
+
+
+class ReportDebugCustomFilter(logging.Filter):
+ """
+ A custom filter for crm report debug message
+ """
+ def filter(self, record):
+ from .config import report
+ if record.levelno == logging.DEBUG:
+ return int(report.verbosity) >= 1
+ if record.levelno == DEBUG2:
+ return int(report.verbosity) > 1
+ else:
+ return True
+
+
+class GroupWriteRotatingFileHandler(logging.handlers.RotatingFileHandler):
+ """
+ A custom rotating file handler which keeps log files group wirtable after rotating
+ Source: https://stackoverflow.com/a/6779307
+ """
+ def _open(self):
+ rtv = super()._open()
+ try:
+ shutil.chown(rtv.name, group=constants.HA_GROUP)
+ os.fchmod(rtv.fileno(), 0o664)
+ shutil.chown(rtv.name, user=constants.HA_USER)
+ except PermissionError:
+ # The file has been open, and FileHandler can write to it.
+ # Failing to change owner or mode is not a fatal error.
+ pass
+ return rtv
+
+
+LOGGING_CFG = {
+ "version": 1,
+ "disable_existing_loggers": "False",
+ "formatters": {
+ "console_report": {
+ "()": LeveledFormatter,
+ "base_formatter_factory": ConsoleColoredFormatter,
+ "default_fmt": "{}: %(levelname)s: %(message)s".format(socket.gethostname()),
+ "level_fmt": {
+ DEBUG2: "{}: %(levelname)s: %(funcName)s: %(message)s".format(socket.gethostname()),
+ },
+ },
+ "console": {
+ "()": LeveledFormatter,
+ "base_formatter_factory": ConsoleColoredFormatter,
+ "default_fmt": "%(levelname)s: %(message)s",
+ "level_fmt": {
+ DEBUG2: "%(levelname)s: %(funcName)s %(message)s",
+ },
+ },
+ "file": {
+ "format": "%(asctime)s {} %(name)s: %(levelname)s: %(message)s".format(socket.gethostname()),
+ "datefmt": "%b %d %H:%M:%S",
+ }
+ },
+ "filters": {
+ "filter": {
+ "()": DebugCustomFilter
+ },
+ "filter_report": {
+ "()": ReportDebugCustomFilter
+ },
+ },
+ "handlers": {
+ 'null': {
+ 'class': 'logging.NullHandler'
+ },
+ "console_report": {
+ "()": ConsoleCustomHandler,
+ "formatter": "console_report",
+ "filters": ["filter_report"]
+ },
+ "console": {
+ "()": ConsoleCustomHandler,
+ "formatter": "console",
+ "filters": ["filter"]
+ },
+ "buffer": {
+ "class": "logging.handlers.MemoryHandler",
+ "capacity": 1024*100,
+ "flushLevel": logging.CRITICAL,
+ },
+ "file": {
+ "()": GroupWriteRotatingFileHandler,
+ "filename": CRMSH_LOG_FILE,
+ "formatter": "file",
+ "filters": ["filter"],
+ "maxBytes": 1*1024*1024,
+ "backupCount": 10
+ }
+ },
+ "loggers": {
+ "crmsh": {
+ "handlers": ["null", "file", "console", "buffer"],
+ "level": "DEBUG"
+ },
+ "crmsh.crash_test": {
+ "handlers": ["null", "file", "console"],
+ "propagate": False,
+ "level": "DEBUG"
+ },
+ "crmsh.report": {
+ "handlers": ["null", "file", "console_report"],
+ "propagate": False,
+ "level": "DEBUG"
+ }
+ }
+}
+
+
+NO_COLOR_FORMATTERS = {
+ "console_report": {
+ "()": LeveledFormatter,
+ "base_formatter_factory": logging.Formatter,
+ "default_fmt": "{}: %(levelname)s: %(message)s".format(socket.gethostname()),
+ "level_fmt": {
+ DEBUG2: "{}: %(levelname)s: %(funcName)s: %(message)s".format(socket.gethostname()),
+ },
+ },
+ "console": {
+ "()": LeveledFormatter,
+ "base_formatter_factory": logging.Formatter,
+ "default_fmt": "%(levelname)s: %(message)s",
+ "level_fmt": {
+ DEBUG2: "%(levelname)s: %(funcName)s %(message)s",
+ },
+ },
+ "file": {
+ "format": "%(asctime)s {} %(name)s: %(levelname)s: %(message)s".format(socket.gethostname()),
+ "datefmt": "%b %d %H:%M:%S",
+ }
+}
+
+
+class LoggerUtils(object):
+ """
+ A class to keep/update some attributes related with logger
+ Also has methods related with handler and formatter
+ And a set of wrapped log message for specific scenarios
+ """
+ def __init__(self, logger: NumberedLogger):
+ """
+ Init function
+ """
+ self.logger = logger
+ # used in regression test
+ self.__save_lineno = 0
+
+ def get_handler(self, _type):
+ """
+ Get logger specific handler
+ """
+ for h in self.logger.handlers:
+ if getattr(h, '_name') == _type:
+ return h
+ else:
+ raise ValueError("Failed to find \"{}\" handler in logger \"{}\"".format(_type, self.logger.name))
+
+ def disable_info_in_console(self):
+ """
+ Set log level as warning in console
+ """
+ console_handler = self.get_handler("console")
+ console_handler.setLevel(logging.WARNING)
+
+ def reset_lineno(self, to=0):
+ """
+ Reset line number
+ """
+ self.logger.reset_lineno(to)
+
+ def incr_lineno(self):
+ """
+ Increase line number
+ """
+ self.logger.incr_lineno()
+
+ @contextmanager
+ def only_file(self):
+ """
+ Only log to file in bootstrap logger
+ """
+ console_handler = self.get_handler("console")
+ try:
+ self.logger.removeHandler(console_handler)
+ yield
+ finally:
+ self.logger.addHandler(console_handler)
+
+ def log_only_to_file(self, msg, level=logging.INFO):
+ from .config import core
+ if core.debug:
+ self.logger.log(logging.DEBUG, msg)
+ else:
+ with self.only_file():
+ self.logger.log(level, msg)
+
+ @contextmanager
+ def buffer(self):
+ """
+ Keep log messages in memory and finally show them in console
+ """
+ console_handler = self.get_handler("console")
+ buffer_handler = self.get_handler("buffer")
+ try:
+ # remove console handler temporarily
+ self.logger.removeHandler(console_handler)
+ buffer_handler.buffer.clear()
+ # set the target of buffer handler as console
+ buffer_handler.setTarget(console_handler)
+ yield
+ finally:
+ empty = not buffer_handler.buffer
+ # close the buffer handler(flush to console handler)
+ buffer_handler.close()
+ # add console handler back
+ self.logger.addHandler(console_handler)
+ if not empty and not options.batch:
+ try:
+ input("Press enter to continue... ")
+ except EOFError:
+ pass
+
+ @contextmanager
+ def line_number(self):
+ """
+ Mark the line number in the log record
+ """
+ try:
+ self.__save_lineno = self.logger.lineno
+ self.reset_lineno()
+ yield
+ finally:
+ self.logger.reset_lineno(self.__save_lineno)
+
+ @contextmanager
+ def status_long(self, msg):
+ """
+ To wait and mark something finished, start with BEGIN msg, end of END msg
+ """
+ self.logger.info("BEGIN %s", msg)
+ try:
+ yield ProgressBar()
+ except Exception:
+ self.logger.error("FAIL %s", msg)
+ raise
+ else:
+ self.logger.info("END %s", msg)
+
+ def wait_input(self, prompt_string, default=""):
+ """
+ Wrap input function with recording prompt string and input result
+ """
+ with self.only_file():
+ self.logger.info(prompt_string)
+ value = input(prompt_string)
+ if not value:
+ value = default
+ with self.only_file():
+ self.logger.info("input result: %s", value)
+ return value
+
+ def confirm(self, msg):
+ """
+ To ask question
+ Return True when input y
+ Record question and answer by wait_input
+ """
+ while True:
+ ans = self.wait_input("{} (y/n)? ".format(msg.strip("? ")))
+ if ans:
+ return ans.lower() == "y"
+
+ def syntax_err(self, s, token='', context='', msg=''):
+ err = "syntax"
+ if context:
+ err += " in {}".format(context)
+ if msg:
+ err += ": {}".format(msg)
+ if isinstance(s, str):
+ err += " parsing '{}'".format(s)
+ elif token:
+ err += " near <{}> parsing '{}'".format(token, ' '.join(s))
+ else:
+ err += " parsing '{}'".format(' '.join(s))
+ self.logger.error(err)
+
+ def no_prog_err(self, name):
+ self.logger.error("%s not available, check your installation", name)
+
+ def unsupported_err(self, name):
+ self.logger.error("%s is not supported", name)
+
+ def missing_obj_err(self, node):
+ self.logger.error("object %s:%s missing (shouldn't have happened)", node.tag, node.get("id"))
+
+ def constraint_norefobj_err(self, constraint_id, obj_id):
+ self.logger.error("constraint %s references a resource %s which doesn't exist", constraint_id, obj_id)
+
+ def no_object_err(self, name):
+ self.logger.error("object %s does not exist", name)
+
+ def invalid_id_err(self, obj_id):
+ self.logger.error("%s: invalid object id", obj_id)
+
+ def id_used_err(self, node_id):
+ self.logger.error("%s: id is already in use", node_id)
+
+ def bad_usage(self, cmd, args, msg=None):
+ if not msg:
+ self.logger.error("Bad usage: '%s %s'", cmd, args)
+ else:
+ self.logger.error("Bad usage: %s, command: '%s %s'", msg, cmd, args)
+
+ def empty_cib_err(self):
+ self.logger.error("No CIB!")
+
+ def text_xml_parse_err(self, msg, s):
+ self.logger.error(msg)
+ self.logger.info("offending string: %s", s)
+
+ def cib_ver_unsupported_err(self, validator, rel):
+ self.logger.error("Unsupported CIB: validator '%s', release '%s'", validator, rel)
+ self.logger.error("To upgrade an old (<1.0) schema, use the upgrade command.")
+
+ def update_err(self, obj_id, cibadm_opt, xml, rc):
+ CIB_PERMISSION_DENIED_CODE = 54
+ task_table = {"-U": "update", "-D": "delete", "-P": "patch"}
+ task = task_table.get(cibadmin_opt, "replace")
+ self.logger.error("could not %s %s (rc=%d)", task, obj_id, int(rc))
+ if int(rc) == CIB_PERMISSION_DENIED_CODE:
+ self.logger.info("Permission denied.")
+ elif task == "patch":
+ self.logger.info("offending xml diff: %s", xml)
+ else:
+ self.logger.info("offending xml: %s", xml)
+
+
+class ProgressBar:
+ def __init__(self):
+ self._i = 0
+
+ def progress(self):
+ try:
+ width, _ = os.get_terminal_size()
+ except OSError:
+ # not a terminal
+ return
+ self._i = (self._i + 1) % width
+ line = '\r{}{}'.format('.' * self._i, ' ' * (width - self._i))
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+
+def setup_logging(only_help=False):
+ """
+ Setup log directory and loadding logging config dict
+ """
+ # To avoid the potential "permission denied" error under other users (boo#1192754)
+ if only_help:
+ LOGGING_CFG["handlers"]["file"] = {'class': 'logging.NullHandler'}
+ # dirname(CRMSH_LOG_FILE) should be created by package manager during installation
+ try:
+ with open(CRMSH_LOG_FILE, 'a'):
+ pass
+ except (PermissionError, FileNotFoundError) as e:
+ print('{}WARNING:{} Failed to open log file: {}'.format(constants.YELLOW, constants.END, e), file=sys.stderr)
+ LOGGING_CFG["handlers"]["file"] = {'class': 'logging.NullHandler'}
+ logging.addLevelName(DEBUG2, "DEBUG2")
+ if os.environ.get('CRMSH_REGRESSION_TEST'):
+ logging.setLoggerClass(NumberedLogger)
+ LOGGING_CFG['formatters'] = NO_COLOR_FORMATTERS
+ logging.config.dictConfig(LOGGING_CFG)
+ else:
+ logging.setLoggerClass(NumberedLoggerInterface)
+ logging.config.dictConfig(LOGGING_CFG)
+
+
+def setup_logger(name):
+ """
+ Get the logger
+ name could be any module name
+ should assign parent's handlers for inherit
+ """
+ logger = logging.getLogger(name)
+ logger.handlers = logger.parent.handlers
+ logger.propagate = False
+ return logger
+
+
+def setup_report_logger(name):
+ """
+ Get the logger for crm report
+ """
+ logger = setup_logger(name)
+ return logger
diff --git a/crmsh/log_patterns.py b/crmsh/log_patterns.py
new file mode 100644
index 0000000..ac62f14
--- /dev/null
+++ b/crmsh/log_patterns.py
@@ -0,0 +1,287 @@
+# Copyright (C) 2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+#
+# log pattern specification
+#
+# patterns are grouped one of several classes:
+# - resource: pertaining to a resource
+# - node: pertaining to a node
+# - quorum: quorum changes
+# - events: other interesting events (core dumps, etc)
+#
+# paterns are grouped based on a detail level
+# detail level 0 is the lowest, i.e. should match the least
+# number of relevant messages
+
+# NB:
+# %% stands for whatever user input we get, for instance a
+# resource name or node name or just some regular expression
+# in optimal case, it should be surrounded by literals
+#
+# [Note that resources may contain clone numbers!]
+
+from . import constants
+from . import utils
+
+__all__ = ('patterns',)
+
+_patterns_old = {
+ "resource": (
+ ( # detail 0
+ "lrmd.*%% (?:start|stop|promote|demote|migrate)",
+ "lrmd.*RA output: .%%:.*:stderr",
+ "lrmd.*WARN: Managed %%:.*exited",
+ "lrmd.*WARN: .* %% .*timed out$",
+ "crmd.*LRM operation %%_(?:start|stop|promote|demote|migrate)_.*confirmed=true",
+ "crmd.*LRM operation %%_.*Timed Out",
+ "[(]%%[)]\[",
+ ),
+ ( # detail 1
+ "lrmd.*%% (?:probe|notify)",
+ "lrmd.*Managed %%:.*exited",
+ ),
+ ),
+ "node": (
+ ( # detail 0
+ " %% .*Corosync.Cluster.Engine",
+ " %% .*Executive.Service.RELEASE",
+ " %% .*Requesting.shutdown",
+ " %% .*Shutdown.complete",
+ " %% .*Configuration.validated..Starting.heartbeat",
+ "pengine.*Scheduling Node %% for STONITH",
+ "crmd.* of %% failed",
+ "stonith-ng.*host '%%'",
+ "Exec.*on %% ",
+ "Node %% will be fenced",
+ "stonith-ng.*for %% timed",
+ "stonith-ng.*can not fence %%:",
+ "stonithd.*Succeeded.*node %%:",
+ "(?:lost|memb): %% ",
+ "crmd.*(?:NEW|LOST):.* %% ",
+ "Node return implies stonith of %% ",
+ ),
+ ( # detail 1
+ ),
+ ),
+ "quorum": (
+ ( # detail 0
+ "crmd.*Updating.quorum.status",
+ "crmd.*quorum.(?:lost|ac?quir)",
+ ),
+ ( # detail 1
+ ),
+ ),
+ "events": (
+ ( # detail 0
+ "CRIT:",
+ "ERROR:",
+ ),
+ ( # detail 1
+ "WARN:",
+ ),
+ ),
+}
+
+_patterns_118 = {
+ "resource": (
+ ( # detail 0
+ "crmd.*Initiating.*%%_(?:start|stop|promote|demote|migrate)_",
+ "lrmd.*operation_finished: %%_",
+ "lrmd.*executing - rsc:%% action:(?:start|stop|promote|demote|migrate)",
+ "lrmd.*finished - rsc:%% action:(?:start|stop|promote|demote|migrate)",
+
+ "crmd.*LRM operation %%_(?:start|stop|promote|demote|migrate)_.*confirmed=true",
+ "crmd.*LRM operation %%_.*Timed Out",
+ "[(]%%[)]\[",
+ ),
+ ( # detail 1
+ "crmd.*Initiating.*%%_(?:monitor_0|notify)",
+ "lrmd.*executing - rsc:%% action:(?:monitor_0|notify)",
+ "lrmd.*finished - rsc:%% action:(?:monitor_0|notify)",
+ ),
+ ),
+ "node": (
+ ( # detail 0
+ " %% .*Corosync.Cluster.Engine",
+ " %% .*Executive.Service.RELEASE",
+ " %% .*crm_shutdown:.Requesting.shutdown",
+ " %% .*pcmk_shutdown:.Shutdown.complete",
+ " %% .*Configuration.validated..Starting.heartbeat",
+ "pengine.*Scheduling Node %% for STONITH",
+ "pengine.*Node %% will be fenced",
+ "crmd.*for %% failed",
+ "stonith-ng.*host '%%'",
+ "Exec.*on %% ",
+ "Node %% will be fenced",
+ "stonith-ng.*on %% for.*timed out",
+ "stonith-ng.*can not fence %%:",
+ "stonithd.*Succeeded.*node %%:",
+ "(?:lost|memb): %% ",
+ "crmd.*(?:NEW|LOST|new|lost):.* %% ",
+ "Node return implies stonith of %% ",
+ ),
+ ( # detail 1
+ ),
+ ),
+ "quorum": (
+ ( # detail 0
+ "crmd.*Updating.(quorum).status",
+ r"crmd.*quorum.(?:lost|ac?quir[^\s]*)",
+ ),
+ ( # detail 1
+ ),
+ ),
+ "events": (
+ ( # detail 0
+ "(CRIT|crit|ERROR|error|UNCLEAN|unclean):",
+ ),
+ ( # detail 1
+ "(WARN|warning):",
+ ),
+ ),
+}
+
+_patterns_200 = {
+ "resource": (
+ ( # detail 0
+ "pacemaker-controld.*Initiating.*%%_(?:start|stop|promote|demote|migrate)_",
+ "pacemaker-execd.*operation_finished: %%_",
+ "pacemaker-execd.*executing - rsc:%% action:(?:start|stop|promote|demote|migrate)",
+ "pacemaker-execd.*finished - rsc:%% action:(?:start|stop|promote|demote|migrate)",
+
+ "pacemaker-controld.*Result of .* operation for .* on .*: .*confirmed=true",
+ "pacemaker-controld.*Result of .* operation for .* on .*: Timed Out",
+ "[(]%%[)]\[",
+ ),
+ ( # detail 1
+ "pacemaker-controld.*Initiating.*%%_(?:monitor_0|notify)",
+ "pacemaker-execd.*executing - rsc:%% action:(?:monitor_0|notify)",
+ "pacemaker-execd.*finished - rsc:%% action:(?:monitor_0|notify)",
+ ),
+ ),
+ "node": (
+ ( # detail 0
+ " %% .*Corosync.Cluster.Engine",
+ " %% .*Executive.Service.RELEASE",
+ " %% .*crm_shutdown:.Requesting.shutdown",
+ " %% .*pcmk_shutdown:.Shutdown.complete",
+ " %% .*Configuration.validated..Starting.heartbeat",
+ "schedulerd.*Scheduling Node %% for STONITH",
+ "schedulerd.*will be fenced",
+ "pacemaker-controld.*for %% failed",
+ "stonith-ng.*host '%%'",
+ "Exec.*on %% ",
+ " %% will be fenced",
+ "stonith-ng.*on %% for.*timed out",
+ "stonith-ng.*can not fence %%:",
+ "pacemaker-fenced.*Succeeded.*node %%:",
+ "fenced.*(requests|(Succeeded|Failed).to.|result=)",
+ "(?:lost|memb): %% ",
+ "pacemaker-controld.*(?:NEW|LOST|new|lost):.* %% ",
+ r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
+ r"Fencing daemon connection failed",
+ r"pacemaker-controld.*Fencer successfully connected",
+ "State transition .* S_RECOVERY",
+ r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
+ r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
+ r"Connection to the scheduler failed",
+ "pacemaker-controld.*I_ERROR.*save_cib_contents",
+ r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
+ "pacemaker-controld.*Could not recover from internal error",
+ r"pacemaker-controld.*Connection to executor failed",
+ r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
+ r"pacemaker-controld.*State transition .* S_RECOVERY",
+ r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
+ r"pacemaker-controld.*Could not recover from internal error",
+ r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
+ r"pacemakerd.* Respawning pacemaker-execd subdaemon after unexpected exit",
+ r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
+ r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
+ r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
+ r"pacemakerd.* Respawning pacemaker-attrd subdaemon after unexpected exit",
+ r"pacemakerd.* Respawning pacemaker-based subdaemon after unexpected exit",
+ r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
+ r"pacemakerd.* Respawning pacemaker-fenced subdaemon after unexpected exit",
+ r"pacemaker-.* Connection to cib_.* (failed|closed)",
+ r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
+ r"pacemaker-controld.*:.*Lost connection to the CIB manager",
+ r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
+ r"pacemaker-controld.* State transition .* S_RECOVERY",
+ r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
+ r"pacemaker-controld.*Could not recover from internal error",
+ ),
+ ( # detail 1
+ ),
+ ),
+ "quorum": (
+ ( # detail 0
+ "pacemaker-controld.*Updating.(quorum).status",
+ r"pacemaker-controld.*quorum.(?:lost|ac?quir[^\s]*)",
+ r"pacemakerd.*:\s*warning:.*Lost connection to cluster layer",
+ r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
+ r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
+ r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
+ r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
+ r"schedulerd.*Scheduling node .* for fencing",
+ r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
+ ),
+ ( # detail 1
+ ),
+ ),
+ "events": (
+ ( # detail 0
+ "(CRIT|crit|ERROR|error|UNCLEAN|unclean):",
+ r"Shutting down...NOW",
+ r"Timer I_TERMINATE just popped",
+ r"input=I_ERROR",
+ r"input=I_FAIL",
+ r"input=I_INTEGRATED cause=C_TIMER_POPPED",
+ r"input=I_FINALIZED cause=C_TIMER_POPPED",
+ r"input=I_ERROR",
+ r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
+ r"schedulerd.*Attempting recovery of resource",
+ r"is taking more than 2x its timeout",
+ r"Confirm not received from",
+ r"Welcome reply not received from",
+ r"Attempting to schedule .* after a stop",
+ r"Resource .* was active at shutdown",
+ r"duplicate entries for call_id",
+ r"Search terminated:",
+ r":global_timer_callback",
+ r"Faking parameter digest creation",
+ r"Parameters to .* action changed:",
+ r"Parameters to .* changed",
+ r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)",
+ r"pacemaker-schedulerd.*Recover\s+.*\(.* -\> .*\)",
+ r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
+ r"Peer is not part of our cluster",
+ r"We appear to be in an election loop",
+ r"Unknown node -> we will not deliver message",
+ r"(Blackbox dump requested|Problem detected)",
+ r"pacemakerd.*Could not connect to Cluster Configuration Database API",
+ r"Receiving messages from a node we think is dead",
+ r"share the same cluster nodeid",
+ r"share the same name",
+ r"pacemaker-controld:.*Transition failed: terminated",
+ r"Local CIB .* differs from .*:",
+ r"warn.*:\s*Continuing but .* will NOT be used",
+ r"warn.*:\s*Cluster configuration file .* is corrupt",
+ #r"Executing .* fencing operation",
+ r"Election storm",
+ r"stalled the FSA with pending inputs",
+ ),
+ ( # detail 1
+ "(WARN|warning):",
+ ),
+ ),
+}
+
+
+def patterns(cib_f=None):
+ if utils.is_min_pcmk_ver(constants.PCMK_VERSION_DEFAULT, cib_f=cib_f):
+ return _patterns_200
+ is118 = utils.is_larger_than_pcmk_118(cib_f=cib_f)
+ if is118:
+ return _patterns_118
+ else:
+ return _patterns_old
diff --git a/crmsh/logparser.py b/crmsh/logparser.py
new file mode 100644
index 0000000..e170f9c
--- /dev/null
+++ b/crmsh/logparser.py
@@ -0,0 +1,641 @@
+# Copyright (C) 2016 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import bz2
+import gzip
+import re
+import os
+import sys
+import collections
+import json
+import time
+
+from . import xmlutil
+from . import logtime
+from . import utils
+from . import log_patterns
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+_METADATA_FILENAME = "__meta.json"
+_METADATA_CACHE_AGE = (60.0 * 60.0)
+# Update this when changing the metadata format
+_METADATA_VERSION = 1
+
+
+def _open_logfile(logfile):
+ """
+ Open a file which may be gz|bz2 compressed.
+ Uncompress based on extension.
+ """
+ try:
+ if logfile.endswith(".bz2"):
+ return bz2.BZ2File(logfile)
+ if logfile.endswith(".gz"):
+ return gzip.open(logfile)
+ return open(logfile, "rb")
+ except IOError as msg:
+ logger.error("open %s: %s", logfile, msg)
+ return None
+
+
+def _transition_start_re():
+ """
+ Return regular expression matching transition start.
+ number_re can be a specific transition or a regexp matching
+ any transition number.
+ The resulting RE has groups
+ 1: transition number
+ 2: full path of pe file
+ 3: pe file number
+ """
+ m1 = "pacemaker-controld.*Processing graph ([0-9]+).*derived from (.*/pe-[^-]+-([0-9]+)[.]bz2)"
+ m2 = "pacemaker-schedulerd.*[Tt]ransition ([0-9]+).*([^ ]*/pe-[^-]+-([0-9]+)[.]bz2)"
+ try:
+ return re.compile("(?:%s)|(?:%s)" % (m1, m2))
+ except re.error as e:
+ logger.debug("RE compilation failed: %s", e)
+ raise ValueError("Error in search expression")
+
+
+def pefile_shortname(pe_file):
+ return os.path.basename(pe_file).replace(".bz2", "")
+
+
+def trans_str(node, pe_file):
+ '''Convert node,pe_file to transition string.'''
+ return "%s:%s" % (node, pefile_shortname(pe_file))
+
+
+def _transition_end_re():
+ """
+ Return RE matching transition end.
+ See transition_start_re for more details.
+
+ 1: trans_num
+ 2: pe_file
+ 3: pe_num
+ 4: state
+ """
+ try:
+ return re.compile("pacemaker-controld.*Transition ([0-9]+).*Source=(.*/pe-[^-]+-([0-9]+)[.]bz2).:.*(Stopped|Complete|Terminated)")
+ except re.error as e:
+ logger.debug("RE compilation failed: %s", e)
+ raise ValueError("Error in search expression")
+
+
+_GRAPH_ACTIONS_RE = re.compile("([A-Z][a-z]+)=([0-9]+)")
+
+
+def _run_graph_msg_actions(msg):
+ '''
+ crmd: [13667]: info: run_graph: Transition 399 (Complete=5,
+ Pending=1, Fired=1, Skipped=0, Incomplete=3,
+ Source=...
+ Returns dict: d[Pending]=np, d[Fired]=nf, ...
+ Only stores non-zero values.
+ '''
+ d = {}
+ s = msg
+ r = _GRAPH_ACTIONS_RE.search(s)
+ while r:
+ val = int(r.group(2))
+ if val != 0:
+ d[r.group(1)] = val
+ s = s[r.end():]
+ r = _GRAPH_ACTIONS_RE.search(s)
+ return d
+
+
+def mk_re_list(patt_l, repl):
+ 'Build a list of regular expressions, replace "%%" with repl'
+ l = []
+ for re_l in patt_l:
+ l += [x.replace("%%", repl) for x in re_l]
+ if not repl:
+ l = [x.replace(".*.*", ".*") for x in l]
+ return l
+
+
+class Transition(object):
+ __slots__ = ('loc', 'dc', 'start_ts', 'trans_num', 'pe_file', 'pe_num', 'end_ts', 'end_state', 'end_actions', 'tags')
+
+ def __init__(self, loc, dc, start_ts, trans_num, pe_file, pe_num):
+ self.loc = loc
+ self.dc = dc
+ self.start_ts = start_ts
+ self.trans_num = trans_num
+ self.pe_file = pe_file
+ self.pe_num = pe_num
+ self.end_ts = None
+ self.end_state = None
+ self.end_actions = None
+ self.tags = set()
+
+ def __str__(self):
+ return trans_str(self.dc, self.pe_file)
+
+ def shortname(self):
+ return pefile_shortname(self.pe_file)
+
+ def actions(self):
+ return self.end_actions
+
+ def actions_count(self):
+ if self.end_actions is not None:
+ return sum(self.end_actions.values())
+ return -1
+
+ def path(self):
+ return os.path.join(self.loc, self.dc, "pengine", self.pe_file)
+
+ def description(self):
+ s = "%s %s - %s: %-15s %-15s %s" % (
+ utils.shortdate(self.start_ts),
+ utils.shorttime(self.start_ts),
+ self.end_ts and utils.shorttime(self.end_ts) or "--:--:--",
+ self.shortname(),
+ self.dc,
+ " ".join(sorted(self.tags))
+ )
+ return s
+
+ def empty(self, prev):
+ """
+ True if this transition resulted in no actions and no CIB changes
+ prev: previous transition
+ """
+ old_pe_l_file = prev.path()
+ new_pe_l_file = self.path()
+ no_actions = self.actions_count() == 0
+ if not os.path.isfile(old_pe_l_file) or not os.path.isfile(new_pe_l_file):
+ return no_actions
+ old_cib = xmlutil.compressed_file_to_cib(old_pe_l_file)
+ new_cib = xmlutil.compressed_file_to_cib(new_pe_l_file)
+ if old_cib is None or new_cib is None:
+ return no_actions
+ prev_epoch = old_cib.attrib.get("epoch", "0")
+ epoch = new_cib.attrib.get("epoch", "0")
+ prev_admin_epoch = old_cib.attrib.get("admin_epoch", "0")
+ admin_epoch = new_cib.attrib.get("admin_epoch", "0")
+ return no_actions and epoch == prev_epoch and admin_epoch == prev_admin_epoch
+
+ def transition_info(self):
+ print("Transition %s (%s -" % (self, utils.shorttime(self.start_ts)), end=' ')
+ if self.end_ts:
+ print("%s):" % utils.shorttime(self.end_ts))
+ act_d = self.actions()
+ total = self.actions_count()
+ s = ", ".join(["%d %s" % (act_d[x], x) for x in act_d if act_d[x]])
+ print("\ttotal %d actions: %s" % (total, s))
+ else:
+ print("[unfinished])")
+
+ def to_dict(self):
+ """
+ Serialize to dict (for cache)
+ """
+ o = {"tags": list(self.tags)}
+ for k in self.__slots__:
+ if k in ("loc", "tags"):
+ continue
+ o[k] = getattr(self, k)
+ return o
+
+ @classmethod
+ def from_dict(cls, loc, obj):
+ t = Transition(loc, None, None, None, None, None)
+ for k, v in obj.items():
+ setattr(t, k, set(v) if k == "tags" else v)
+ return t
+
+
+class CibInfo(object):
+ def __init__(self, report_path):
+ self.filename = utils.file_find_by_name(report_path, "cib.xml")
+ self.nodes = []
+ self.primitives = []
+ self.groups = {}
+ self.clones = {}
+ self.cloned_resources = set()
+ self.not_cloned_resources = set()
+
+ cib_elem = None
+ if self.filename:
+ cib_elem = xmlutil.file2cib_elem(self.filename)
+
+ if cib_elem is None:
+ self.nodes = utils.list_cluster_nodes()
+ return
+
+ self.nodes = [x.get("uname") or x.get("id") for x in cib_elem.xpath("/cib/configuration/nodes/node")]
+
+ self.primitives = [x.get("id") for x in cib_elem.xpath("/cib/configuration/resources//primitive")]
+
+ for grp in cib_elem.xpath("/cib/configuration/resources/group"):
+ self.groups[grp.get("id")] = xmlutil.get_rsc_children_ids(grp)
+
+ for cln in cib_elem.xpath("/cib/configuration/resources/*[self::clone or self::master]"):
+ self.clones[cln.get("id")] = xmlutil.get_prim_children_ids(cln)
+ self.cloned_resources.union(self.clones[cln.get("id")])
+
+ self.not_cloned_resources = set(x for x in self.primitives if x not in self.cloned_resources)
+
+ def resources(self):
+ return self.primitives + list(self.groups.keys()) + list(self.clones.keys())
+
+ def match_resources(self):
+ """
+ list of regex expressions to match resources
+ """
+ rsc_l = list(self.not_cloned_resources)
+ rsc_l += ["%s(?::[0-9]+)?" % x for x in self.cloned_resources]
+ return rsc_l
+
+
+class LogParser(object):
+ """
+ Used by the history explorer.
+ Given a report directory, generates log metadata.
+
+ TODO:
+
+ This information is then written to a file called %(_METADATA_FILENAME),
+ and the next time the history explorer is started, we skip the
+ analysis and load the metadata directly.
+
+ The analysis is done over the complete log: Timeframe narrowing happens elsewhere.
+ """
+
+ def __init__(self, loc, cib, logfiles, detail):
+ """
+ report_root: Base directory of the report
+ """
+ self.loc = loc
+ self.cib = cib
+ self.filenames = logfiles
+ self.fileobjs = [_open_logfile(f) for f in logfiles]
+ self.detail = detail
+
+ self.events = {}
+ self.transitions = []
+
+ self.from_ts = None
+ self.to_ts = None
+
+ def __del__(self):
+ for f in self.fileobjs:
+ f.close()
+
+ def scan(self, mode=None):
+ """
+ mode = 'refresh':
+ Re-read logs that may have new data appended.
+ Right now this re-scans all the log data.
+ TODO: Only scan new data by tracking the previous
+ end of each file and scanning from there. Retain
+ previous data and just add new transitions / events.
+
+ Returns list of pefiles missing from report. [(node, [pefile ...]) ...]
+
+ mode = 'force':
+ Completely re-parse (ignore cache)
+ """
+ with utils.nogc():
+ return self._scan(mode=mode)
+
+ def _scan(self, mode):
+ """
+ Scan logs and generate metadata for transitions,
+ tags and events. (used when retreiving log lines later)
+
+ Returns list of pefiles missing from report. [(node, [pefile ...]) ...]
+
+ mode: None, 'refresh' or 'force'
+
+ TODO: Load/save metadata when already generated.
+ TODO: scan each logfile in a separate thread?
+ """
+
+ if mode not in ('refresh', 'force') and self._load_cache():
+ return []
+
+ missing_pefiles = []
+
+ # {etype -> [(sortkey, msg)]}
+ # TODO: store (sortkey, fileid, spos) instead?
+ self.events = collections.defaultdict(list)
+
+ self.transitions = []
+
+ # trans_num:pe_num -> Transition()
+ transitions_map = {}
+
+ startre = _transition_start_re()
+ endre = _transition_end_re()
+
+ eventre = {}
+ eventre["node"] = self._build_re("node", self.cib.nodes)
+ eventre["resource"] = self._build_re("resource", self.cib.match_resources())
+ eventre["quorum"] = self._build_re("quorum", [])
+ eventre["events"] = self._build_re("events", [])
+
+ DEFAULT, IN_TRANSITION = 0, 1
+ state = DEFAULT
+ transition = None
+
+ for logidx, (filename, log) in enumerate(zip(self.filenames, self.fileobjs)):
+ log.seek(0)
+ logger.debug("parsing %s", filename)
+ line = "a"
+ while line != '':
+ spos = log.tell()
+ line = utils.to_ascii(log.readline())
+ m = startre.search(line)
+ if m:
+ # m.groups() is (transnum1, pefile1, penum1, transnum2, pefile2, penum2) where
+ # it matched either 1 or 2
+ t1, p1, n1, t2, p2, n2 = m.groups()
+ if t1 is not None:
+ trans_num, pe_file, pe_num = t1, p1, n1
+ else:
+ trans_num, pe_file, pe_num = t2, p2, n2
+ pe_orig = pe_file
+ pe_file = os.path.basename(pe_orig)
+ ts, dc = logtime.syslog_ts_node(line)
+ if ts is None or dc is None:
+ continue
+ id_ = trans_str(dc, pe_file)
+ transition = transitions_map.get(id_)
+ if transition is None:
+ transition = Transition(self.loc, dc, ts, trans_num, pe_file, pe_num)
+ self.transitions.append(transition)
+ transitions_map[id_] = transition
+ logger.debug("{Transition: %s", transition)
+
+ if not os.path.isfile(transition.path()):
+ missing_pefiles.append((dc, pe_orig))
+ else:
+ logger.debug("~Transition: %s old(%s, %s) new(%s, %s)", transition, transition.trans_num, transition.pe_file, trans_num, pe_file)
+ state = IN_TRANSITION
+ continue
+ if state == IN_TRANSITION:
+ m = endre.search(line)
+ if m:
+ trans_num, pe_file, pe_num, state = m.groups()
+ pe_file = os.path.basename(pe_file)
+ ts, dc = logtime.syslog_ts_node(line)
+ if ts is None or dc is None:
+ continue
+ transition = transitions_map.get(trans_str(dc, pe_file))
+ if transition is None:
+ # transition end without previous begin...
+ logger.debug("Found transition end without start: %s: %s - %s:%s", ts, filename, trans_num, pe_file)
+ else:
+ transition.end_state = state
+ transition.end_ts = ts
+ transition.end_actions = _run_graph_msg_actions(line)
+ logger.debug("}Transition: %s %s", transition, state)
+ state = DEFAULT
+
+ # events
+ for etype, erx in eventre.items():
+ for rx in erx:
+ m = rx.search(line)
+ if m:
+ ts = logtime.syslog_ts(line)
+ if ts is None:
+ continue
+ logger.debug("+Event %s: %s: %s", etype, ", ".join(m.groups()), line.strip('\n'))
+ sk = (int(ts) << 32) + int(spos)
+ self.events[etype].append((sk, logidx, spos))
+ if transition is not None:
+ for t in m.groups():
+ if t:
+ transition.tags.add(t.lower())
+
+ if state == DEFAULT:
+ transition = None
+
+ self.transitions.sort(key=lambda t: t.start_ts)
+ for etype, logs in self.events.items():
+ logs.sort(key=lambda e: e[0])
+ empties = []
+ for i, t in enumerate(self.transitions):
+ if i == 0:
+ continue
+ if t.empty(self.transitions[i - 1]):
+ empties.append(t)
+ self.transitions = [t for t in self.transitions if t not in empties]
+ self._save_cache()
+ if missing_pefiles:
+ rdict = collections.defaultdict(list)
+ for node, pe in missing_pefiles:
+ rdict[node].append(pe)
+ missing_pefiles = list(rdict.items())
+ return missing_pefiles
+
+ def set_timeframe(self, from_t, to_t):
+ """
+ from_t, to_t: timestamps or datetime objects
+ """
+ self.from_ts = logtime.make_time(from_t)
+ self.to_ts = logtime.make_time(to_t)
+
+ def get_logs(self, nodes=None):
+ """
+ Generator which yields a list of log messages limited by the
+ list of nodes, or from all nodes.
+
+ The log lines are printed in order, by reading from
+ all files at once and always printing the line with
+ the lowest timestamp
+ """
+
+ def include_log(logfile):
+ return not nodes or os.path.basename(os.path.dirname(logfile)) in nodes
+
+ for f in self.fileobjs:
+ f.seek(0)
+
+ lines = [[None, utils.to_ascii(f.readline()), f] for f in self.fileobjs]
+ for i, line in enumerate(lines):
+ if not line[1]:
+ line[0], line[2] = sys.float_info.max, None
+ else:
+ line[0] = logtime.syslog_ts(line[1])
+
+ while any(f is not None for _, _, f in lines):
+ x = min(lines, key=lambda v: v[0])
+ if x[0] is None or x[2] is None:
+ break
+ if self.to_ts and x[0] > self.to_ts:
+ break
+ if not (self.from_ts and x[0] < self.from_ts):
+ yield x[1]
+ x[1] = utils.to_ascii(x[2].readline())
+ if not x[1]:
+ x[0], x[2] = sys.float_info.max, None
+ else:
+ x[0] = logtime.syslog_ts(x[1])
+
+ def get_events(self, event=None, nodes=None, resources=None):
+ """
+ Generator which outputs matching event lines
+ event: optional node, resource, quorum
+ nodes: optional list of nodes
+ resources: optional list of resources
+
+ TODO: ordering, time limits
+ """
+ if event is not None:
+ eventlogs = [event]
+ else:
+ eventlogs = sorted(list(self.events.keys()))
+
+ if nodes:
+ rxes = self._build_re(event, nodes)
+ elif resources:
+ expanded_l = []
+ for r in resources:
+ if r in self.cib.groups:
+ expanded_l += self.cib.groups[r]
+ elif r in self.cib.clones:
+ expanded_l += self.cib.clones[r]
+ else:
+ expanded_l.append(r)
+
+ def clonify(r):
+ return r + "(?::[0-9]+)?" if r in self.cib.cloned_resources else r
+ expanded_l = [clonify(r) for r in expanded_l]
+ rxes = self._build_re(event, expanded_l)
+ else:
+ rxes = None
+
+ if event == "resource" and resources is not None and rxes is not None:
+ logger.debug("resource %s rxes: %s", ", ".join(resources), ", ".join(r.pattern for r in rxes))
+
+ if rxes is not None:
+ for log in eventlogs:
+ for _, f, pos in self.events.get(log, []):
+ self.fileobjs[f].seek(pos)
+ msg = utils.to_ascii(self.fileobjs[f].readline())
+ if any(rx.search(msg) for rx in rxes):
+ ts = logtime.syslog_ts(msg)
+ if not (self.from_ts and ts < self.from_ts) and not (self.to_ts and ts > self.to_ts):
+ yield msg
+ else:
+ for log in eventlogs:
+ for _, f, pos in self.events.get(log, []):
+ self.fileobjs[f].seek(pos)
+ msg = utils.to_ascii(self.fileobjs[f].readline())
+ ts = logtime.syslog_ts(msg)
+ if not (self.from_ts and ts < self.from_ts) and not (self.to_ts and ts > self.to_ts):
+ yield msg
+
+ def get_transitions(self):
+ """
+ Yields transitions within the current timeframe
+ """
+ for t in self.transitions:
+ if not (self.from_ts and t.end_ts and t.end_ts < self.from_ts) and not (self.to_ts and t.start_ts and t.start_ts > self.to_ts):
+ yield t
+
+ def _get_patt_l(self, etype):
+ '''
+ get the list of patterns for this type, up to and
+ including current detail level
+ '''
+ patterns = log_patterns.patterns(cib_f=self.cib.filename)
+ if etype not in patterns:
+ logger.error("%s not featured in log patterns", etype)
+ return None
+ return patterns[etype][0:self.detail+1]
+
+ def _build_re(self, etype, args):
+ '''
+ Prepare a regex string for the type and args.
+ For instance, "resource" and rsc1, rsc2, ...
+ '''
+ patt_l = self._get_patt_l(etype)
+ if not patt_l:
+ return None
+ if not args:
+ re_l = mk_re_list(patt_l, "")
+ else:
+ re_l = mk_re_list(patt_l, r'(%s)' % "|".join(args))
+ return [re.compile(r) for r in re_l]
+
+ def to_dict(self):
+ """
+ Serialize self to dict (including transition objects)
+ """
+ o = {
+ "version": _METADATA_VERSION,
+ "events": self.events,
+ "transitions": [t.to_dict() for t in self.transitions],
+ "cib": {
+ "nodes": self.cib.nodes,
+ "primitives": self.cib.primitives,
+ "groups": self.cib.groups,
+ "clones": self.cib.clones
+ }
+ }
+ return o
+
+ def from_dict(self, obj):
+ """
+ Load from dict
+ """
+ if "version" not in obj or obj["version"] != _METADATA_VERSION:
+ return False
+ self.events = obj["events"]
+ self.transitions = [Transition.from_dict(self.loc, t) for t in obj["transitions"]]
+ return True
+
+ def _metafile(self):
+ return os.path.join(self.loc, _METADATA_FILENAME)
+
+ def count(self):
+ """
+ Returns (num transitions, num events)
+ """
+ return len(self.transitions), sum(len(e) for e in list(self.events.values()))
+
+ def _save_cache(self):
+ """
+ Save state to cache file
+ """
+ fn = self._metafile()
+ try:
+ with open(fn, 'wt') as f:
+ json.dump(self.to_dict(), f, indent=2)
+ logger.debug("Transition metadata saved to %s", fn)
+ except IOError as e:
+ logger.debug("Could not update metadata cache: %s", e)
+
+ def _load_cache(self):
+ """
+ Load state from cache file
+ """
+ fn = self._metafile()
+ if os.path.isfile(fn):
+ meta_mtime = os.stat(fn).st_mtime
+ logf_mtime = max([os.stat(f).st_mtime for f in self.filenames if os.path.isfile(f)])
+
+ if meta_mtime >= logf_mtime and time.time() - meta_mtime < _METADATA_CACHE_AGE:
+ try:
+ with open(fn, 'r') as f:
+ try:
+ if not self.from_dict(json.load(f)):
+ return False
+ logger.debug("Transition metadata loaded from %s", fn)
+ return True
+ except ValueError as e:
+ logger.debug("Failed to load metadata: %s", e)
+ except IOError as e:
+ return False
+ return False
diff --git a/crmsh/logtime.py b/crmsh/logtime.py
new file mode 100644
index 0000000..1062c88
--- /dev/null
+++ b/crmsh/logtime.py
@@ -0,0 +1,232 @@
+# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+"""
+Helpers for handling log timestamps.
+"""
+
+import re
+import time
+import datetime
+from . import utils
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+YEAR = None
+
+
+def set_year(ts=None):
+ '''
+ ts: optional time in seconds
+ '''
+ global YEAR
+ year = time.strftime("%Y", time.localtime(ts))
+ if YEAR is not None:
+ t = (" (ts: %s)" % (ts)) if ts is not None else ""
+ logger.debug("history: setting year to %s%s", year, t)
+ YEAR = year
+
+
+def human_date(dt=None):
+ '''
+ Convert datetime argument into a presentational string.
+
+ dt: Datetime (default: now)
+ '''
+ if dt is None:
+ dt = utils.make_datetime_naive(datetime.datetime.now())
+ # here, dt is in UTC. Convert to localtime:
+ localdt = datetime.datetime.fromtimestamp(utils.datetime_to_timestamp(dt))
+ # drop microseconds
+ return re.sub("[.].*", "", "%s %s" % (localdt.date(), localdt.time()))
+
+
+def make_time(t):
+ '''
+ t: time in seconds / datetime / other
+ returns: time in floating point
+ '''
+ if t is None:
+ return None
+ elif isinstance(t, datetime.datetime):
+ return utils.datetime_to_timestamp(t)
+ return t
+
+
+# fmt1: group 11 is node
+# fmt2: group 2 is node
+# fmt3: group 2 is node
+# fmt4: node not available?
+_syslog2node_formats = (re.compile(r'^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:.(\d+))?([+-])(\d{2}):?(\d{2})\s+(?:\[\d+\])?\s*([\S]+)'),
+ re.compile(r'^(\d{4}-\d{2}-\d{2}T\S+)\s+(?:\[\d+\])?\s*([\S]+)'),
+ re.compile(r'^([a-zA-Z]{2,4}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+(?:\[\d+\])?\s*([\S]+)'),
+ re.compile(r'^(\d{4}\/\d{2}\/\d{2}_\d{2}:\d{2}:\d{2})'),
+ re.compile(r'^([A-Z][a-z]+ \d{1,2} \d{2}:\d{2}:\d{2}\.\d+) ([\S]+)'))
+
+_syslog_ts_prev = None
+
+
+def syslog_ts(s):
+ """
+ Finds the timestamp in the given line
+ Returns as floating point, seconds
+ """
+ global _syslog_ts_prev
+ fmt1, fmt2, fmt3, fmt4, fm5 = _syslog2node_formats
+
+ # RFC3339
+ m = fmt1.match(s)
+ if m:
+ year, month, day, hour, minute, second, ms, tzsgn, tzh, tzm, _ = m.groups()
+ ts = time.mktime((int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, -1))
+ if tzsgn == '+':
+ ts += (3600.0 * float(tzh) + 60.0 * float(tzm))
+ else:
+ ts -= (3600.0 * float(tzh) + 60.0 * float(tzm))
+ if ms:
+ ts += float("0.%s" % ms)
+ _syslog_ts_prev = ts
+ return _syslog_ts_prev
+
+ m = fmt2.match(s)
+ if m:
+ _syslog_ts_prev = utils.parse_to_timestamp(m.group(1))
+ return _syslog_ts_prev
+
+ m = fmt3.match(s)
+ if m:
+ if YEAR is None:
+ set_year()
+ tstr = YEAR + ' ' + m.group(1)
+
+ dt = datetime.datetime.strptime(tstr, '%Y %b %d %H:%M:%S')
+ from dateutil import tz
+ ts = utils.total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))
+ _syslog_ts_prev = ts
+ return _syslog_ts_prev
+
+ m = fmt4.match(s)
+ if m:
+ tstr = m.group(1).replace('_', ' ')
+ _syslog_ts_prev = utils.parse_to_timestamp(tstr)
+ return _syslog_ts_prev
+
+ m = fm5.match(s)
+ if m:
+ _syslog_ts_prev = utils.parse_to_timestamp(m.group(1))
+ return _syslog_ts_prev
+
+ logger.debug("malformed line: %s", s)
+ return _syslog_ts_prev
+
+
+_syslog_node_prev = None
+
+
+def syslog2node(s):
+ '''
+ Get the node from a syslog line.
+
+ old format:
+ Aug 14 11:07:04 <node> ...
+ new format:
+ Aug 14 11:07:04 [<PID>] <node> ...
+ RFC5424:
+ <TS> <node> ...
+ RFC5424 (2):
+ <TS> [<PID>] <node> ...
+ '''
+ global _syslog_node_prev
+
+ fmt1, fmt2, fmt3, _, _ = _syslog2node_formats
+ m = fmt1.match(s)
+ if m:
+ _syslog_node_prev = m.group(11)
+ return _syslog_node_prev
+
+ m = fmt2.match(s)
+ if m:
+ _syslog_node_prev = m.group(2)
+ return _syslog_node_prev
+
+ m = fmt3.match(s)
+ if m:
+ _syslog_node_prev = m.group(2)
+ return _syslog_node_prev
+
+ try:
+ # strptime defaults year to 1900 (sigh)
+ time.strptime(' '.join(s.split()[0:3]),
+ "%b %d %H:%M:%S")
+ _syslog_node_prev = s.split()[3]
+ return _syslog_node_prev
+ except ValueError: # try the rfc5424
+ ls = s.split()
+ if not ls:
+ return _syslog_node_prev
+ rfc5424 = s.split()[0]
+ if 'T' in rfc5424:
+ try:
+ utils.parse_to_timestamp(rfc5424)
+ _syslog_node_prev = s.split()[1]
+ return _syslog_node_prev
+ except Exception:
+ return _syslog_node_prev
+ else:
+ return _syslog_node_prev
+
+
+def syslog_ts_node(s):
+ """
+ Returns (timestamp, node) from a syslog log line
+ """
+ global _syslog_ts_prev
+ global _syslog_node_prev
+ fmt1, fmt2, fmt3, fmt4, fmt5 = _syslog2node_formats
+
+ # RFC3339
+ m = fmt1.match(s)
+ if m:
+ year, month, day, hour, minute, second, ms, tzsgn, tzh, tzm, node = m.groups()
+ ts = time.mktime((int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, -1))
+ if tzsgn == '+':
+ ts += (3600.0 * float(tzh) + 60.0 * float(tzm))
+ else:
+ ts -= (3600.0 * float(tzh) + 60.0 * float(tzm))
+ _syslog_ts_prev = ts
+ _syslog_node_prev = node
+ return _syslog_ts_prev, node
+
+ m = fmt2.match(s)
+ if m:
+ _syslog_ts_prev, _syslog_node_prev = utils.parse_to_timestamp(m.group(1)), m.group(2)
+ return _syslog_ts_prev, _syslog_node_prev
+
+ m = fmt3.match(s)
+ if m:
+ if YEAR is None:
+ set_year()
+ tstr = YEAR + ' ' + m.group(1)
+
+ dt = datetime.datetime.strptime(tstr, '%Y %b %d %H:%M:%S')
+ from dateutil import tz
+ ts = utils.total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))
+ _syslog_ts_prev, _syslog_node_prev = ts, m.group(2)
+ return _syslog_ts_prev, _syslog_node_prev
+
+ m = fmt4.match(s)
+ if m:
+ tstr = m.group(1).replace('_', ' ')
+ _syslog_ts_prev = utils.parse_to_timestamp(tstr)
+ return _syslog_ts_prev, _syslog_node_prev
+
+ m = fmt5.match(s)
+ if m:
+ _syslog_ts_prev, _syslog_node_prev = utils.parse_to_timestamp(m.group(1)), m.group(2)
+ return _syslog_ts_prev, _syslog_node_prev
+
+ logger.debug("malformed line: %s", s)
+ return _syslog_ts_prev, _syslog_node_prev
diff --git a/crmsh/main.py b/crmsh/main.py
new file mode 100644
index 0000000..e03c07e
--- /dev/null
+++ b/crmsh/main.py
@@ -0,0 +1,385 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import sys
+import os
+import atexit
+import random
+
+from . import config
+from . import options
+from . import constants
+from . import clidisplay
+from . import term
+from . import upgradeutil
+from . import utils
+from . import userdir
+
+from . import ui_root
+from . import ui_context
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+random.seed()
+
+
+def load_rc(context, rcfile):
+ # only load the RC file if there is no new-style user config
+ if config.has_user_config():
+ return
+
+ try:
+ f = open(rcfile)
+ except:
+ return
+ save_stdin = sys.stdin
+ sys.stdin = f
+ while True:
+ inp = utils.multi_input()
+ if inp is None:
+ break
+ try:
+ if not context.run(inp):
+ raise ValueError("Error in RC file: " + rcfile)
+ except ValueError as e:
+ logger.error(e, exc_info=e)
+ f.close()
+ sys.stdin = save_stdin
+
+
+def exit_handler():
+ '''
+ Write the history file. Remove tmp files.
+ '''
+ if options.interactive and not options.batch:
+ try:
+ from readline import write_history_file
+ write_history_file(userdir.HISTORY_FILE)
+ except:
+ pass
+
+
+# prefer the user set PATH
+def envsetup():
+ path = os.environ["PATH"].split(':')
+ # always add these dirs to PATH if they exist
+ libexec_dirs = ('/usr/lib64', '/usr/libexec', '/usr/lib',
+ '/usr/local/lib64', '/usr/local/libexec', '/usr/local/lib')
+ pacemaker_dirs = set("{}/pacemaker".format(d) for d in libexec_dirs)
+ pacemaker_dirs.add(config.path.crm_daemon_dir)
+ pacemaker_dirs.add(os.path.dirname(sys.argv[0]))
+ for p in pacemaker_dirs:
+ if p not in path and os.path.isdir(p):
+ os.environ['PATH'] = "%s:%s" % (os.environ['PATH'], p)
+
+
+# three modes: interactive (no args supplied), batch (input from
+# a file), half-interactive (args supplied, but not batch)
+def cib_prompt():
+ shadow = utils.get_cib_in_use()
+ if not shadow:
+ return constants.live_cib_prompt
+ if constants.tmp_cib:
+ return constants.tmp_cib_prompt
+ return shadow
+
+
+def make_option_parser():
+ from argparse import ArgumentParser, REMAINDER
+ parser = ArgumentParser(prog='crm', usage="""%(prog)s [-h|--help] [OPTIONS] [SUBCOMMAND ARGS...]
+or %(prog)s help SUBCOMMAND
+
+For a list of available subcommands, use %(prog)s help.
+
+Use %(prog)s without arguments for an interactive session.
+Call a subcommand directly for a "single-shot" use.
+Call %(prog)s with a level name as argument to start an interactive
+session from that level.
+
+See the crm(8) man page or call %(prog)s help for more details.""")
+ parser.add_argument('--version', action='version', version="%(prog)s " + config.CRM_VERSION)
+ parser.add_argument("-f", "--file", dest="filename", metavar="FILE",
+ help="Load commands from the given file. If a dash (-) " +
+ "is used in place of a file name, crm will read commands " +
+ "from the shell standard input (stdin).")
+ parser.add_argument("-c", "--cib", dest="cib", metavar="CIB",
+ help="Start the session using the given shadow CIB file. " +
+ "Equivalent to `cib use <CIB>`.")
+ parser.add_argument("-D", "--display", dest="display", metavar="OUTPUT_TYPE",
+ help="Choose one of the output options: plain, color-always, color, or uppercase. " +
+ "The default is color if the terminal emulation supports colors, " +
+ "else plain.")
+ parser.add_argument("-F", "--force", action="store_true", default=False, dest="force",
+ help="Make crm proceed with applying changes where it would normally " +
+ "ask the user to confirm before proceeding. This option is mainly useful " +
+ "in scripts, and should be used with care.")
+ parser.add_argument("-n", "--no", action="store_true", default=False, dest="ask_no",
+ help="Automatically answer no when prompted")
+ parser.add_argument("-w", "--wait", action="store_true", default=False, dest="wait",
+ help="Make crm wait for the cluster transition to finish " +
+ "(for the changes to take effect) after each processed line.")
+ parser.add_argument("-H", "--history", dest="history", metavar="DIR|FILE|SESSION",
+ help="A directory or file containing a cluster report to load " +
+ "into history, or the name of a previously saved history session.")
+ parser.add_argument("-d", "--debug", action="store_true", default=False, dest="debug",
+ help="Print verbose debugging information.")
+ parser.add_argument("-R", "--regression-tests", action="store_true", default=False,
+ dest="regression_tests",
+ help="Enables extra verbose trace logging used by the regression " +
+ "tests. Logs all external calls made by crmsh.")
+ parser.add_argument("--scriptdir", dest="scriptdir", metavar="DIR",
+ help="Extra directory where crm looks for cluster scripts, or a list " +
+ "of directories separated by semi-colons (e.g. /dir1;/dir2;etc.).")
+ parser.add_argument("-X", dest="profile", metavar="PROFILE",
+ help="Collect profiling data and save in PROFILE.")
+ parser.add_argument("-o", "--opt", action="append", type=str, metavar="OPTION=VALUE",
+ help="Set crmsh option temporarily. If the options are saved using" +
+ "+options save+ then the value passed here will also be saved." +
+ "Multiple options can be set by using +-o+ multiple times.")
+ parser.add_argument("SUBCOMMAND", nargs=REMAINDER)
+ return parser
+
+
+option_parser = make_option_parser()
+
+
+def usage(rc):
+ option_parser.print_usage(file=(sys.stderr if rc != 0 else sys.stdout))
+ sys.exit(rc)
+
+
+def set_interactive():
+ '''Set the interactive option only if we're on a tty.'''
+ if utils.can_ask():
+ options.interactive = True
+
+
+def add_quotes(args):
+ '''
+ Add quotes if there's whitespace in one of the
+ arguments; so that the user doesn't need to protect the
+ quotes.
+
+ If there are two kinds of quotes which actually _survive_
+ the getopt, then we're _probably_ screwed.
+
+ At any rate, stuff like ... '..."..."'
+ as well as '...\'...\'' do work.
+ '''
+ l = []
+ for s in args:
+ if config.core.add_quotes and ' ' in s:
+ q = '"' in s and "'" or '"'
+ if q not in s:
+ s = "%s%s%s" % (q, s, q)
+ l.append(s)
+ return l
+
+
+def handle_noninteractive_use(context, user_args):
+ """
+ returns: either a status code of 0 or 1, or
+ None to indicate that nothing was done here.
+ """
+ if options.shadow:
+ if not context.run("cib use " + options.shadow):
+ return 1
+
+ # this special case is silly, but we have to keep it to
+ # preserve the backward compatibility
+ if len(user_args) == 1 and user_args[0].startswith("conf"):
+ if not context.run("configure"):
+ return 1
+ elif len(user_args) > 0:
+ # we're not sure yet whether it's an interactive session or not
+ # (single-shot commands aren't)
+ logger_utils.reset_lineno()
+ options.interactive = False
+
+ l = add_quotes(user_args)
+ if context.run(' '.join(l)):
+ # if the user entered a level, then just continue
+ if not context.previous_level():
+ return 0
+ set_interactive()
+ if options.interactive:
+ logger_utils.reset_lineno(-1)
+ else:
+ return 1
+ return None
+
+
+def render_prompt(context):
+ rendered_prompt = constants.prompt
+ if options.interactive and not options.batch:
+ # TODO: fix how color interacts with readline,
+ # seems the color prompt messes it up
+ promptstr = "crm(%s/%s)%s# " % (cib_prompt(), utils.this_node(), context.prompt())
+ constants.prompt = promptstr
+ if clidisplay.colors_enabled():
+ rendered_prompt = term.render(clidisplay.prompt(promptstr))
+ else:
+ rendered_prompt = promptstr
+ return rendered_prompt
+
+
+def setup_context(context):
+ if options.input_file and options.input_file != "-":
+ try:
+ sys.stdin = open(options.input_file)
+ except IOError as msg:
+ logger.error(msg)
+ usage(2)
+
+ if options.interactive and not options.batch:
+ context.setup_readline()
+
+
+def main_input_loop(context, user_args):
+ """
+ Main input loop for crmsh. Parses input
+ line by line.
+ """
+ rc = handle_noninteractive_use(context, user_args)
+ if rc is not None:
+ return rc
+
+ setup_context(context)
+
+ rc = 0
+ while True:
+ try:
+ inp = utils.multi_input(render_prompt(context))
+ if inp is None:
+ if options.interactive:
+ rc = 0
+ context.quit(rc)
+ try:
+ if not context.run(inp):
+ rc = 1
+ except ValueError as e:
+ rc = 1
+ logger.error(e, exc_info=e)
+ except KeyboardInterrupt:
+ if options.interactive and not options.batch:
+ print("Ctrl-C, leaving")
+ context.quit(1)
+ except Exception as e:
+ logger.error(e, exc_info=e)
+ context.quit(1)
+
+
+def compgen():
+ args = sys.argv[2:]
+ if len(args) < 2:
+ return
+
+ options.shell_completion = True
+
+ # point = int(args[0])
+ line = args[1]
+
+ # remove [*]crm from commandline
+ idx = line.find('crm')
+ if idx >= 0:
+ line = line[idx+3:].lstrip()
+
+ options.interactive = False
+ ui = ui_root.Root()
+ context = ui_context.Context(ui)
+ last_word = line.rsplit(' ', 1)
+ if len(last_word) > 1 and ':' in last_word[1]:
+ idx = last_word[1].rfind(':')
+ for w in context.complete(line):
+ print(w[idx+1:])
+ else:
+ for w in context.complete(line):
+ print(w)
+
+
+def parse_options():
+ opts, args = option_parser.parse_known_args()
+ utils.check_empty_option_value(opts)
+ config.core.debug = "yes" if opts.debug else config.core.debug
+ options.profile = opts.profile or options.profile
+ options.regression_tests = opts.regression_tests or options.regression_tests
+ config.color.style = opts.display or config.color.style
+ config.core.force = opts.force or config.core.force
+ if opts.filename:
+ logger_utils.reset_lineno()
+ options.input_file, options.batch, options.interactive = opts.filename, True, False
+ options.history = opts.history or options.history
+ config.core.wait = opts.wait or config.core.wait
+ options.shadow = opts.cib or options.shadow
+ options.scriptdir = opts.scriptdir or options.scriptdir
+ options.ask_no = opts.ask_no
+ for opt in opts.opt or []:
+ try:
+ k, v = opt.split('=')
+ s, n = k.split('.')
+ config.set_option(s, n, v)
+ except ValueError as e:
+ raise ValueError("Expected -o <section>.<name>=<value>: %s" % (e))
+ return opts.SUBCOMMAND
+
+
+def profile_run(context, user_args):
+ import cProfile
+ cProfile.runctx('main_input_loop(context, user_args)',
+ globals(),
+ {'context': context, 'user_args': user_args},
+ filename=options.profile)
+ # print how to use the profile file, but don't disturb
+ # the regression tests
+ if not options.regression_tests:
+ stats_cmd = "; ".join(['import pstats',
+ 's = pstats.Stats("%s")' % options.profile,
+ 's.sort_stats("cumulative").print_stats()'])
+ print("python -c '%s' | less" % (stats_cmd))
+ return 0
+
+
+def run():
+ try:
+ if len(sys.argv) >= 2 and sys.argv[1] == '--compgen':
+ compgen()
+ return 0
+ envsetup()
+ userdir.mv_user_files()
+
+ ui = ui_root.Root()
+ context = ui_context.Context(ui)
+
+ load_rc(context, userdir.RC_FILE)
+ atexit.register(exit_handler)
+ options.interactive = utils.can_ask()
+ if not options.interactive:
+ logger_utils.reset_lineno()
+ options.batch = True
+ user_args = parse_options()
+ if config.core.debug:
+ logger.debug(utils.debug_timestamp())
+ term.init()
+ if options.profile:
+ return profile_run(context, user_args)
+ else:
+ upgradeutil.upgrade_if_needed()
+ return main_input_loop(context, user_args)
+ except KeyboardInterrupt:
+ if config.core.debug:
+ raise
+ else:
+ print("Ctrl-C, leaving")
+ sys.exit(1)
+ except ValueError as e:
+ logger.error(e, exc_info=e)
+ sys.exit(1)
+ except Exception as e:
+ logger.error(e, exc_info=e)
+ raise
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/minieval.py b/crmsh/minieval.py
new file mode 100644
index 0000000..06e780c
--- /dev/null
+++ b/crmsh/minieval.py
@@ -0,0 +1,370 @@
+# Copyright (C) 2013-2017 Daniel Fairhead
+# Copyright (C) 2017 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+"""
+Based on simpleeval:
+
+SimpleEval - (C) 2013-2017 Daniel Fairhead
+-------------------------------------
+
+An short, easy to use, safe and reasonably extensible expression evaluator.
+Designed for things like in a website where you want to allow the user to
+generate a string, or a number from some other input, without allowing full
+eval() or other unsafe or needlessly complex linguistics.
+
+-------------------------------------
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+-------------------------------------
+
+Initial idea copied from J.F. Sebastian on Stack Overflow
+( http://stackoverflow.com/a/9558001/1973500 ) with
+modifications and many improvments.
+
+-------------------------------------
+Contributors:
+- corro (Robin Baumgartner) (py3k)
+- dratchkov (David R) (nested dicts)
+- marky1991 (Mark Young) (slicing)
+- T045T (Nils Berg) (!=, py3kstr, obj.
+- perkinslr (Logan Perkins) (.__globals__ or .func_ breakouts)
+- impala2 (Kirill Stepanov) (massive _eval refactor)
+- gk (ugik) (Other iterables than str can DOS too, and can be made)
+- daveisfera (Dave Johansen) 'not' Boolean op, Pycharm and pep8 fixes.
+
+-------------------------------------
+Usage:
+
+>>> s = SimpleEval()
+>>> s.eval("20 + 30")
+50
+
+You can add your own functions easily too:
+
+if file.txt contents is "11"
+
+>>> def get_file():
+ with open("file.txt",'r') as f:
+ return f.read()
+
+ s.functions["get_file"] = get_file
+ s.eval("int(get_file()) + 31")
+42
+
+For more information, see the full package documentation on pypi, or the github
+repo.
+
+-----------
+
+If you don't need to re-use the evaluator (with it's names, functions, etc),
+then you can use the simple_eval() function:
+
+>>> simple_eval("21 + 19")
+40
+
+You can pass names, operators and functions to the simple_eval function as
+well:
+
+>>> simple_eval("40 + two", names={"two": 2})
+42
+
+"""
+
+import ast
+import sys
+import operator as op
+
+########################################
+# Module wide 'globals'
+
+MAX_STRING_LENGTH = 100000
+DISALLOW_PREFIXES = ['_', 'func_']
+DISALLOW_METHODS = ['format']
+
+PYTHON3 = sys.version_info[0] == 3
+
+########################################
+# Exceptions:
+
+
+class InvalidExpression(Exception):
+ """ Generic Exception """
+ pass
+
+
+class NameNotDefined(InvalidExpression):
+ """ a name isn't defined. """
+ def __init__(self, name, expression):
+ self.name = name
+ self.message = "'{0}' is not defined for expression '{1}'".format(
+ name, expression)
+ self.expression = expression
+
+ # pylint: disable=bad-super-call
+ super(InvalidExpression, self).__init__(self.message)
+
+
+class AttributeDoesNotExist(InvalidExpression):
+ """attribute does not exist"""
+ def __init__(self, attr, expression):
+ self.message = \
+ "Attribute '{0}' does not exist in expression '{1}'".format(
+ attr, expression)
+ self.attr = attr
+ self.expression = expression
+
+
+class FeatureNotAvailable(InvalidExpression):
+ """ What you're trying to do is not allowed. """
+ pass
+
+
+class IterableTooLong(InvalidExpression):
+ """ That iterable is **way** too long, baby. """
+ pass
+
+
+########################################
+# Defaults for the evaluator:
+
+DEFAULT_OPERATORS = {ast.Eq: op.eq, ast.NotEq: op.ne,
+ ast.Gt: op.gt, ast.Lt: op.lt,
+ ast.GtE: op.ge, ast.LtE: op.le,
+ ast.Not: op.not_,
+ ast.USub: op.neg, ast.UAdd: op.pos,
+ ast.In: lambda x, y: op.contains(y, x),
+ ast.NotIn: lambda x, y: not op.contains(y, x),
+ ast.Is: lambda x, y: x is y,
+ ast.IsNot: lambda x, y: x is not y,
+ }
+
+DEFAULT_NAMES = {"True": True, "False": False}
+
+########################################
+# And the actual evaluator:
+
+
+class SimpleEval(object): # pylint: disable=too-few-public-methods
+ """ A very simple expression parser.
+ >>> s = SimpleEval()
+ >>> s.eval("20 + 30 - ( 10 * 5)")
+ 0
+ """
+ expr = ""
+
+ def __init__(self, names):
+ """
+ Create the evaluator instance. Set up valid operators (+,-, etc)
+ functions (add, random, get_val, whatever) and names. """
+
+ operators = DEFAULT_OPERATORS
+ names = names.copy()
+ names.update(DEFAULT_NAMES)
+
+ self.operators = operators
+ self.names = names
+
+ self.nodes = {
+ ast.Num: self._eval_num,
+ ast.Str: self._eval_str,
+ ast.Name: self._eval_name,
+ ast.UnaryOp: self._eval_unaryop,
+ ast.BinOp: self._eval_binop,
+ ast.BoolOp: self._eval_boolop,
+ ast.Compare: self._eval_compare,
+ ast.IfExp: self._eval_ifexp,
+ ast.keyword: self._eval_keyword,
+ ast.Subscript: self._eval_subscript,
+ ast.Attribute: self._eval_attribute,
+ ast.Index: self._eval_index,
+ ast.Slice: self._eval_slice,
+ }
+
+ # py3k stuff:
+ if hasattr(ast, 'NameConstant'):
+ self.nodes[ast.NameConstant] = self._eval_constant
+ elif isinstance(self.names, dict) and "None" not in self.names:
+ self.names["None"] = None
+
+ # py3.8 uses ast.Constant instead of ast.Num, ast.Str, ast.NameConstant
+ if hasattr(ast, 'Constant'):
+ self.nodes[ast.Constant] = self._eval_constant
+
+ def evaluate(self, expr):
+ """ evaluate an expresssion, using the operators, functions and
+ names previously set up. """
+
+ # set a copy of the expression aside, so we can give nice errors...
+
+ self.expr = expr
+
+ # and evaluate:
+ return self._eval(ast.parse(expr.strip()).body[0].value)
+
+ def _eval(self, node):
+ """ The internal evaluator used on each node in the parsed tree. """
+
+ try:
+ handler = self.nodes[type(node)]
+ except KeyError:
+ raise FeatureNotAvailable("Sorry, {0} is not available in this "
+ "evaluator".format(type(node).__name__))
+
+ return handler(node)
+
+ @staticmethod
+ def _eval_num(node):
+ return node.n
+
+ @staticmethod
+ def _eval_str(node):
+ if len(node.s) > MAX_STRING_LENGTH:
+ raise IterableTooLong("String Literal in statement is too long!"
+ " ({0}, when {1} is max)".format(
+ len(node.s), MAX_STRING_LENGTH))
+ return node.s
+
+ @staticmethod
+ def _eval_constant(node):
+ if (hasattr(node.value, '__len__') and
+ len(node.value) > MAX_STRING_LENGTH):
+ raise IterableTooLong("Literal in statement is too long!"
+ " ({0}, when {1} is max)"
+ "".format(len(node.value),
+ MAX_STRING_LENGTH))
+ return node.value
+
+ def _eval_unaryop(self, node):
+ return self.operators[type(node.op)](self._eval(node.operand))
+
+ def _eval_binop(self, node):
+ return self.operators[type(node.op)](self._eval(node.left),
+ self._eval(node.right))
+
+ def _eval_boolop(self, node):
+ if isinstance(node.op, ast.And):
+ vout = False
+ for value in node.values:
+ vout = self._eval(value)
+ if not vout:
+ return False
+ return vout
+ elif isinstance(node.op, ast.Or):
+ for value in node.values:
+ vout = self._eval(value)
+ if vout:
+ return vout
+ return False
+
+ def _eval_compare(self, node):
+ left = self._eval(node.left)
+ for operation, comp in zip(node.ops, node.comparators):
+ right = self._eval(comp)
+ if self.operators[type(operation)](left, right):
+ left = right # Hi Dr. Seuss...
+ else:
+ return False
+ return True
+
+ def _eval_ifexp(self, node):
+ return self._eval(node.body) if self._eval(node.test) \
+ else self._eval(node.orelse)
+
+ def _eval_keyword(self, node):
+ return node.arg, self._eval(node.value)
+
+ def _eval_name(self, node):
+ try:
+ # This happens at least for slicing
+ # This is a safe thing to do because it is impossible
+ # that there is a true exression assigning to none
+ # (the compiler rejects it, so you can't even
+ # pass that to ast.parse)
+ if isinstance(self.names, dict):
+ return self.names[node.id]
+ elif callable(self.names):
+ return self.names(node)
+ else:
+ raise InvalidExpression('Trying to use name (variable) "{0}"'
+ ' when no "names" defined for'
+ ' evaluator'.format(node.id))
+
+ except KeyError:
+ if node.id in self.functions:
+ return self.functions[node.id]
+
+ raise NameNotDefined(node.id, self.expr)
+
+ def _eval_subscript(self, node):
+
+ container = self._eval(node.value)
+ key = self._eval(node.slice)
+ try:
+ return container[key]
+ except KeyError:
+ raise
+
+ def _eval_attribute(self, node):
+ for prefix in DISALLOW_PREFIXES:
+ if node.attr.startswith(prefix):
+ raise FeatureNotAvailable(
+ "Sorry, access to __attributes "
+ " or func_ attributes is not available. "
+ "({0})".format(node.attr))
+ if node.attr in DISALLOW_METHODS:
+ raise FeatureNotAvailable(
+ "Sorry, this method is not available. "
+ "({0})".format(node.attr))
+
+ try:
+ return self._eval(node.value)[node.attr]
+ except (KeyError, TypeError):
+ pass
+
+ # Maybe the base object is an actual object, not just a dict
+ try:
+ return getattr(self._eval(node.value), node.attr)
+ except (AttributeError, TypeError):
+ pass
+
+ # If it is neither, raise an exception
+ raise AttributeDoesNotExist(node.attr, self.expr)
+
+ def _eval_index(self, node):
+ return self._eval(node.value)
+
+ def _eval_slice(self, node):
+ lower = upper = step = None
+ if node.lower is not None:
+ lower = self._eval(node.lower)
+ if node.upper is not None:
+ upper = self._eval(node.upper)
+ if node.step is not None:
+ step = self._eval(node.step)
+ return slice(lower, upper, step)
+
+
+def minieval(expr, env):
+ """
+ Given a dict of variable -> value mapping in env,
+ parse and evaluate the expression in expr
+ """
+ return SimpleEval(env).evaluate(expr)
diff --git a/crmsh/ocfs2.py b/crmsh/ocfs2.py
new file mode 100644
index 0000000..346cc5c
--- /dev/null
+++ b/crmsh/ocfs2.py
@@ -0,0 +1,346 @@
+import re
+from contextlib import contextmanager
+from . import utils, sh
+from . import bootstrap
+from . import ra
+from . import corosync
+from . import log
+from . import xmlutil
+from . import constants
+from .service_manager import ServiceManager
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+class OCFS2Manager(object):
+ """
+ Class to manage OCFS2 and configure related resources
+ """
+ RA_ID_PREFIX = "ocfs2-"
+ DLM_RA_ID = "{}dlm".format(RA_ID_PREFIX)
+ FS_RA_ID = "{}clusterfs".format(RA_ID_PREFIX)
+ LVMLOCKD_RA_ID = "{}lvmlockd".format(RA_ID_PREFIX)
+ LVMACTIVATE_RA_ID = "{}lvmactivate".format(RA_ID_PREFIX)
+ GROUP_ID = "{}group".format(RA_ID_PREFIX)
+ CLONE_ID = "{}clone".format(RA_ID_PREFIX)
+ VG_ID = "{}vg".format(RA_ID_PREFIX)
+ LV_ID = "{}lv".format(RA_ID_PREFIX)
+
+ MAX_CLONE_NUM = 8
+ # Note: using undocumented '-x' switch to avoid prompting if overwriting
+ MKFS_CMD = "mkfs.ocfs2 --cluster-stack pcmk --cluster-name {} -N {} -x {}"
+ HINTS_WHEN_RUNNING = """
+The cluster service has already been initialized, but the prerequisites are missing
+to configure OCFS2. Please fix it and use the stage procedure to configure OCFS2 separately,
+e.g. crm cluster init ocfs2 -o <ocfs2_device>
+ """
+
+ def __init__(self, context):
+ """
+ Init function
+ """
+ self.ocfs2_devices = context.ocfs2_devices
+ self.use_cluster_lvm2 = context.use_cluster_lvm2
+ self.mount_point = context.mount_point
+ self.use_stage = context.stage == "ocfs2"
+ self.yes_to_all = context.yes_to_all
+ self.cluster_name = None
+ self.exist_ra_id_list = []
+ self.vg_id = None
+ self.group_id = None
+ self.target_device = None
+
+ def _verify_packages(self, use_cluster_lvm2=False):
+ """
+ Find if missing required package
+ """
+ required_packages = ["ocfs2-tools"]
+ if use_cluster_lvm2:
+ required_packages.append("lvm2-lockd")
+ for pkg in required_packages:
+ if not utils.package_is_installed(pkg):
+ raise ValueError("Missing required package for configuring OCFS2: {}".format(pkg))
+
+ def _verify_options(self):
+ """
+ Verify options related with OCFS2
+ """
+ if self.use_stage and not self.ocfs2_devices:
+ raise ValueError("ocfs2 stage require -o option")
+ if len(self.ocfs2_devices) > 1 and not self.use_cluster_lvm2:
+ raise ValueError("Without Cluster LVM2 (-C option), -o option only support one device")
+ if self.use_cluster_lvm2 and not self.ocfs2_devices:
+ raise ValueError("-C option only valid together with -o option")
+ if self.mount_point and utils.has_mount_point_used(self.mount_point):
+ raise ValueError("Mount point {} already mounted".format(self.mount_point))
+
+ def _verify_devices(self):
+ """
+ Verify ocfs2 devices
+ """
+ for dev in self.ocfs2_devices:
+ if not utils.is_block_device(dev):
+ raise ValueError("{} doesn't look like a block device".format(dev))
+ if utils.is_dev_used_for_lvm(dev) and self.use_cluster_lvm2:
+ raise ValueError("{} is a Logical Volume, cannot be used with the -C option".format(dev))
+ if utils.has_disk_mounted(dev):
+ raise ValueError("{} already mounted".format(dev))
+
+ def _check_if_already_configured(self):
+ """
+ Check if ocfs2 related resource already configured
+ """
+ if not self.use_stage:
+ return
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm configure show")
+ if "fstype=ocfs2" in out:
+ logger.info("Already configured OCFS2 related resources")
+ raise utils.TerminateSubCommand
+
+ def _static_verify(self):
+ """
+ Verify before configuring on init process
+ """
+ self._verify_packages(self.use_cluster_lvm2)
+ self._check_if_already_configured()
+ self._verify_options()
+ self._verify_devices()
+
+ def _dynamic_raise_error(self, error_msg):
+ """
+ Customize error message after cluster running
+ """
+ raise ValueError(error_msg + ("" if self.use_stage else self.HINTS_WHEN_RUNNING))
+
+ def _check_sbd_and_ocfs2_dev(self):
+ """
+ Raise error when ocfs2 device is the same with sbd device
+ """
+ from . import sbd
+ if ServiceManager().service_is_enabled("sbd.service"):
+ sbd_device_list = sbd.SBDManager.get_sbd_device_from_config()
+ for dev in self.ocfs2_devices:
+ if dev in sbd_device_list:
+ self._dynamic_raise_error("{} cannot be the same with SBD device".format(dev))
+
+ def _confirm_to_overwrite_ocfs2_dev(self):
+ """
+ Confirm to overwrit ocfs2 device on interactive mode
+ """
+ for dev in self.ocfs2_devices:
+ msg = ""
+ if utils.has_dev_partitioned(dev):
+ msg = "Found a partition table in {}".format(dev)
+ else:
+ fs_type = utils.get_dev_fs_type(dev)
+ if fs_type:
+ msg = "{} contains a {} file system".format(dev, fs_type)
+ if msg and not bootstrap.confirm("{} - Proceed anyway?".format(msg)):
+ raise utils.TerminateSubCommand
+
+ for dev in self.ocfs2_devices:
+ sh.cluster_shell().get_stdout_or_raise_error("wipefs -a {}".format(dev))
+
+ def _dynamic_verify(self):
+ """
+ Verify after cluster running
+ """
+ if not utils.has_stonith_running():
+ self._dynamic_raise_error("OCFS2 requires stonith device configured and running")
+
+ self._check_sbd_and_ocfs2_dev()
+ self._confirm_to_overwrite_ocfs2_dev()
+
+ def _gen_ra_scripts(self, ra_type, kv):
+ """
+ Generate ra scripts
+ Return id and scripts
+ """
+ config_scripts = ""
+ kv["id"] = utils.gen_unused_id(self.exist_ra_id_list, kv["id"])
+ config_scripts = ra.CONFIGURE_RA_TEMPLATE_DICT[ra_type].format(**kv)
+ return kv["id"], config_scripts
+
+ def _mkfs(self, target):
+ """
+ Creating OCFS2 filesystem for the target device
+ """
+ with logger_utils.status_long(" Creating OCFS2 filesystem for {}".format(target)):
+ self.cluster_name = corosync.get_value('totem.cluster_name')
+ sh.cluster_shell().get_stdout_or_raise_error(self.MKFS_CMD.format(self.cluster_name, self.MAX_CLONE_NUM, target))
+
+ @contextmanager
+ def _vg_change(self):
+ """
+ vgchange process using contextmanager
+ """
+ shell = sh.cluster_shell()
+ shell.get_stdout_or_raise_error("vgchange -ay {}".format(self.vg_id))
+ try:
+ yield
+ finally:
+ shell.get_stdout_or_raise_error("vgchange -an {}".format(self.vg_id))
+
+ def _create_lv(self):
+ """
+ Create PV, VG, LV and return LV path
+ """
+ disks_string = ' '.join(self.ocfs2_devices)
+ shell = sh.cluster_shell()
+
+ # Create PV
+ with logger_utils.status_long(" Creating PV for {}".format(disks_string)):
+ shell.get_stdout_or_raise_error("pvcreate {} -y".format(disks_string))
+
+ # Create VG
+ self.vg_id = utils.gen_unused_id(utils.get_all_vg_name(), self.VG_ID)
+ with logger_utils.status_long(" Creating VG {}".format(self.vg_id)):
+ shell.get_stdout_or_raise_error("vgcreate --shared {} {} -y".format(self.vg_id, disks_string))
+
+ # Create LV
+ with logger_utils.status_long(" Creating LV {} on VG {}".format(self.LV_ID, self.vg_id)):
+ pe_number = utils.get_pe_number(self.vg_id)
+ shell.get_stdout_or_raise_error("lvcreate -l {} {} -n {} -y".format(pe_number, self.vg_id, self.LV_ID))
+
+ return "/dev/{}/{}".format(self.vg_id, self.LV_ID)
+
+ def _gen_group_and_clone_scripts(self, ra_list):
+ """
+ Generate group and clone scripts
+ """
+ # Group
+ group_kv = {"id":self.GROUP_ID, "ra_string":' '.join(ra_list)}
+ self.group_id, group_scripts = self._gen_ra_scripts("GROUP", group_kv)
+ # Clone
+ clone_kv = {"id":self.CLONE_ID, "group_id":self.group_id}
+ _, clone_scripts = self._gen_ra_scripts("CLONE", clone_kv)
+ return group_scripts + clone_scripts
+
+ def _gen_fs_scripts(self):
+ """
+ Generate Filesystem scripts
+ """
+ fs_kv = {
+ "id": self.FS_RA_ID,
+ "mnt_point": self.mount_point,
+ "fs_type": "ocfs2",
+ "device": self.target_device
+ }
+ return self._gen_ra_scripts("Filesystem", fs_kv)
+
+ def _load_append_and_wait(self, scripts, res_id, msg, need_append=True):
+ """
+ Load scripts, append to exist group and wait resource started
+ """
+ bootstrap.crm_configure_load("update", scripts)
+ if need_append:
+ utils.append_res_to_group(self.group_id, res_id)
+ bootstrap.wait_for_resource(msg, res_id)
+
+ def _config_dlm(self):
+ """
+ Configure DLM resource
+ """
+ config_scripts = ""
+ dlm_id, dlm_scripts = self._gen_ra_scripts("DLM", {"id":self.DLM_RA_ID})
+ group_clone_scripts = self._gen_group_and_clone_scripts([dlm_id])
+ config_scripts = dlm_scripts + group_clone_scripts
+ self._load_append_and_wait(config_scripts, dlm_id, " Wait for DLM({}) start".format(dlm_id), need_append=False)
+
+ def _config_lvmlockd(self):
+ """
+ Configure LVMLockd resource
+ """
+ _id, _scripts = self._gen_ra_scripts("LVMLockd", {"id":self.LVMLOCKD_RA_ID})
+ self._load_append_and_wait(_scripts, _id, " Wait for LVMLockd({}) start".format(_id))
+
+ def _config_lvmactivate(self):
+ """
+ Configure LVMActivate resource
+ """
+ _id, _scripts = self._gen_ra_scripts("LVMActivate", {"id": self.LVMACTIVATE_RA_ID, "vgname": self.vg_id})
+ self._load_append_and_wait(_scripts, _id, " Wait for LVMActivate({}) start".format(_id))
+
+ def _config_fs(self):
+ """
+ Configure Filesystem resource
+ """
+ utils.mkdirp(self.mount_point)
+ _id, _scripts = self._gen_fs_scripts()
+ self._load_append_and_wait(_scripts, _id, " Wait for Filesystem({}) start".format(_id))
+
+ def _config_resource_stack_lvm2(self):
+ """
+ Configure dlm + lvmlockd + lvm-activate + Filesystem
+ """
+ self._config_dlm()
+ self._config_lvmlockd()
+ self.target_device = self._create_lv()
+ with self._vg_change():
+ self._mkfs(self.target_device)
+ self._config_lvmactivate()
+ self._config_fs()
+
+ def _config_resource_stack_ocfs2_along(self):
+ """
+ Configure dlm + Filesystem
+ """
+ self._config_dlm()
+ self.target_device = self.ocfs2_devices[0]
+ self._mkfs(self.target_device)
+ self._config_fs()
+
+ def init_ocfs2(self):
+ """
+ OCFS2 configure process on init node
+ """
+ logger.info("Configuring OCFS2")
+ self._dynamic_verify()
+ self.exist_ra_id_list = utils.all_exist_id()
+
+ no_quorum_policy_value = utils.get_property("no-quorum-policy")
+ if not no_quorum_policy_value or no_quorum_policy_value != "freeze":
+ utils.set_property("no-quorum-policy", "freeze")
+ logger.info(" 'no-quorum-policy' is changed to \"freeze\"")
+
+ if self.use_cluster_lvm2:
+ self._config_resource_stack_lvm2()
+ else:
+ self._config_resource_stack_ocfs2_along()
+ logger.info(" OCFS2 device %s mounted on %s", self.target_device, self.mount_point)
+
+ def _find_target_on_join(self, peer):
+ """
+ Find device name from OCF Filesystem param on peer node
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm configure show", peer)
+ for line in out.splitlines():
+ if "fstype=ocfs2" in line:
+ res = re.search("device=\"(.*?)\"", line)
+ if res:
+ return res.group(1)
+ else:
+ raise ValueError("Filesystem require configure device")
+ return None
+
+ def join_ocfs2(self, peer):
+ """
+ Called on join process, to verify ocfs2 environment
+ """
+ target = self._find_target_on_join(peer)
+ if not target:
+ return
+ with logger_utils.status_long("Verify OCFS2 environment"):
+ use_cluster_lvm2 = xmlutil.CrmMonXmlParser(peer).is_resource_configured(constants.LVMLOCKD_RA)
+ self._verify_packages(use_cluster_lvm2)
+ if utils.is_dev_a_plain_raw_disk_or_partition(target, peer):
+ utils.compare_uuid_with_peer_dev([target], peer)
+
+ @classmethod
+ def verify_ocfs2(cls, ctx):
+ """
+ Verify OCFS2 related packages and environment
+ """
+ inst = cls(ctx)
+ inst._static_verify()
diff --git a/crmsh/options.py b/crmsh/options.py
new file mode 100644
index 0000000..4c6509d
--- /dev/null
+++ b/crmsh/options.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+'''
+Session-only options (not saved).
+'''
+
+interactive = False
+batch = False
+ask_no = False
+regression_tests = False
+profile = ""
+history = "live"
+input_file = ""
+shadow = ""
+scriptdir = ""
+# set to true when completing non-interactively
+shell_completion = False
diff --git a/crmsh/ordereddict.py b/crmsh/ordereddict.py
new file mode 100644
index 0000000..90ef8a1
--- /dev/null
+++ b/crmsh/ordereddict.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2009 Raymond Hettinger
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from UserDict import DictMixin
+
+ class OrderedDict(dict, DictMixin):
+
+ def __init__(self, *args, **kwds):
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__end
+ except AttributeError:
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self.__end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.__map = {} # key --> [key, prev, next]
+ dict.clear(self)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ end = self.__end
+ curr = end[1]
+ curr[2] = end[1] = self.__map[key] = [key, curr, end]
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ key, prev, next_ = self.__map.pop(key)
+ prev[2] = next_
+ next_[1] = prev
+
+ def __iter__(self):
+ end = self.__end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.__end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ def popitem(self, last=True):
+ if not self:
+ raise KeyError('dictionary is empty')
+ if last:
+ key = next(reversed(self))
+ else:
+ key = next(iter(self))
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__end
+ del self.__map, self.__end
+ inst_dict = vars(self).copy()
+ self.__map, self.__end = tmp
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def keys(self):
+ return list(self)
+
+ setdefault = DictMixin.setdefault
+ update = DictMixin.update
+ pop = DictMixin.pop
+ values = DictMixin.values
+ items = DictMixin.items
+ iterkeys = DictMixin.iterkeys
+ itervalues = DictMixin.itervalues
+ iteritems = DictMixin.iteritems
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, list(self.items()))
+
+ def copy(self):
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedDict):
+ if len(self) != len(other):
+ return False
+ for p, q in zip(list(self.items()), list(other.items())):
+ if p != q:
+ return False
+ return True
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+
+odict = OrderedDict
diff --git a/crmsh/orderedset.py b/crmsh/orderedset.py
new file mode 100644
index 0000000..2723328
--- /dev/null
+++ b/crmsh/orderedset.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2009 Raymond Hettinger
+
+# *** MIT License ***
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is furnished to do
+# so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# {{{ http://code.activestate.com/recipes/576694/ (r7)
+
+import collections
+
+KEY, PREV, NEXT = list(range(3))
+
+
+class OrderedSet(collections.abc.MutableSet):
+
+ def __init__(self, iterable=None):
+ self.end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.map = {} # key --> [key, prev, next]
+ if iterable is not None:
+ self |= iterable
+
+ def __len__(self):
+ return len(self.map)
+
+ def __contains__(self, key):
+ return key in self.map
+
+ def add(self, key):
+ if key not in self.map:
+ end = self.end
+ curr = end[PREV]
+ curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
+
+ def discard(self, key):
+ if key in self.map:
+ key, prev, next_ = self.map.pop(key)
+ prev[NEXT] = next_
+ next_[PREV] = prev
+
+ def __iter__(self):
+ end = self.end
+ curr = end[NEXT]
+ while curr is not end:
+ yield curr[KEY]
+ curr = curr[NEXT]
+
+ def __reversed__(self):
+ end = self.end
+ curr = end[PREV]
+ while curr is not end:
+ yield curr[KEY]
+ curr = curr[PREV]
+
+ def pop(self):
+ last = True
+ if not self:
+ raise KeyError('set is empty')
+ key = next(reversed(self)) if last else next(iter(self))
+ self.discard(key)
+ return key
+
+ def intersection(self, other):
+ # return set with all objects in self that are in other
+ return OrderedSet([x for x in self if x in other])
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, list(self))
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedSet):
+ return len(self) == len(other) and list(self) == list(other)
+ return set(self) == set(other)
+
+ def __del__(self):
+ self.clear() # remove circular references
+
+
+oset = OrderedSet
+
+if __name__ == '__main__':
+ print(OrderedSet('abracadaba'))
+ print(OrderedSet('simsalabim'))
+
+# end of http://code.activestate.com/recipes/576694/ }}}
diff --git a/crmsh/pacemaker.py b/crmsh/pacemaker.py
new file mode 100644
index 0000000..e688189
--- /dev/null
+++ b/crmsh/pacemaker.py
@@ -0,0 +1,368 @@
+# Copyright (C) 2009 Yan Gao <ygao@novell.com>
+# See COPYING for license information.
+
+import os
+import tempfile
+import copy
+from lxml import etree
+
+
+class PacemakerError(Exception):
+ '''PacemakerError exceptions'''
+
+
+def get_validate_name(cib_elem):
+ if cib_elem is not None:
+ return cib_elem.get("validate-with")
+ else:
+ return None
+
+
+def get_validate_type(cib_elem):
+ return "rng"
+
+
+def get_schema_filename(validate_name):
+ if not validate_name.endswith('.rng'):
+ return "%s.rng" % (validate_name)
+ return validate_name
+
+
+def read_schema_local(validate_name, file_path):
+ try:
+ with open(file_path) as f:
+ return f.read()
+ except IOError as msg:
+ raise PacemakerError("Cannot read schema file '%s': %s" % (file_path, msg))
+
+
+def delete_dir(dir_path):
+ real_path = os.path.realpath(dir_path)
+ if real_path.count(os.sep) == len(real_path):
+ raise PacemakerError("Do not delete the root directory")
+
+ for root, dirs, files in os.walk(dir_path, False):
+ for name in files:
+ try:
+ os.unlink(os.path.join(root, name))
+ except OSError:
+ continue
+ for name in dirs:
+ try:
+ os.rmdir(os.path.join(root, name))
+ except OSError:
+ continue
+
+ os.rmdir(dir_path)
+
+
+def subset_select(sub_set, optional):
+ "Helper used to select attributes/elements based on subset and optional flag"
+ if sub_set == 'r': # required
+ return not optional
+ if sub_set == 'o': # optional
+ return optional
+ return True
+
+
+def CrmSchema(cib_elem, local_dir):
+ return RngSchema(cib_elem, local_dir)
+
+
+class Schema(object):
+ validate_name = None
+
+ def __init__(self, cib_elem, local_dir, is_local=True, get_schema_fn=None):
+ self.is_local = is_local
+ if get_schema_fn is not None:
+ self.get_schema_fn = get_schema_fn
+ else:
+ self.get_schema_fn = read_schema_local
+
+ self.local_dir = local_dir
+ self.refresh(cib_elem)
+ self.schema_str_docs = {}
+ self.schema_filename = None
+
+ def update_schema(self):
+ 'defined in subclasses'
+ raise NotImplementedError
+
+ def find_elem(self, elem_name):
+ 'defined in subclasses'
+ raise NotImplementedError
+
+ def refresh(self, cib_elem):
+ saved_validate_name = self.validate_name
+ self.validate_name = get_validate_name(cib_elem)
+ self.schema_filename = get_schema_filename(self.validate_name)
+ if self.validate_name != saved_validate_name:
+ return self.update_schema()
+
+ def validate_cib(self, new_cib_elem):
+ detail_msg = ""
+
+ if self.is_local:
+ schema_f = os.path.join(self.local_dir, self.schema_filename)
+ else:
+ try:
+ tmp_f = self.tmp_schema_f()
+ except EnvironmentError as msg:
+ raise PacemakerError("Cannot expand the Relax-NG schema: " + str(msg))
+ if tmp_f is None:
+ raise PacemakerError("Cannot expand the Relax-NG schema")
+ else:
+ schema_f = tmp_f
+
+ try:
+ cib_elem = etree.fromstring(etree.tostring(new_cib_elem))
+ except etree.Error as msg:
+ raise PacemakerError("Failed to parse the CIB XML: " + str(msg))
+
+ try:
+ schema = etree.RelaxNG(file=schema_f)
+
+ except etree.Error as msg:
+ raise PacemakerError("Failed to parse the Relax-NG schema: " + str(msg))
+ try:
+ etree.clear_error_log()
+ except:
+ pass
+
+ is_valid = schema.validate(cib_elem)
+ if not is_valid:
+ for error_entry in schema.error_log:
+ detail_msg += error_entry.level_name + ": " + error_entry.message + "\n"
+
+ if not self.is_local:
+ try:
+ delete_dir(os.path.dirname(tmp_f))
+ except:
+ pass
+
+ return (is_valid, detail_msg)
+
+ def tmp_schema_f(self):
+ tmp_dir = tempfile.mkdtemp()
+ for schema_doc_name in self.schema_str_docs:
+ schema_doc_filename = os.path.join(tmp_dir, schema_doc_name)
+ fd = os.open(schema_doc_filename, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o644)
+
+ schema_doc_str = self.schema_str_docs[schema_doc_name]
+
+ os.write(fd, schema_doc_str)
+ os.close(fd)
+
+ if self.schema_filename in self.schema_str_docs:
+ return os.path.join(tmp_dir, self.schema_filename)
+ else:
+ return None
+
+ def get_sub_elems_by_obj(self, obj, sub_set='a'):
+ '''defined in subclasses'''
+ raise NotImplementedError
+
+ def get_elem_attrs_by_obj(self, obj, sub_set='a'):
+ '''defined in subclasses'''
+ raise NotImplementedError
+
+ # sub_set: 'a'(all), 'r'(required), 'o'(optional)
+ def get_elem_attrs(self, elem_name, sub_set='a'):
+ elem_obj = self.find_elem(elem_name)
+ if elem_obj is None:
+ return None
+ return self.get_elem_attrs_by_obj(elem_obj, sub_set)
+
+ # sub_set: 'a'(all), 'r'(required), 'o'(optional)
+ def get_sub_elems(self, elem_name, sub_set='a'):
+ elem_obj = self.find_elem(elem_name)
+ if elem_obj is None:
+ return None
+ return self.get_sub_elems_by_obj(elem_obj, sub_set)
+
+ def supported_rsc_types(self):
+ return self.get_sub_elems("resources")
+
+
+def get_local_tag(el):
+ return el.tag.replace("{%s}" % el.nsmap[None], "")
+
+
+class RngSchema(Schema):
+ expr = '//*[local-name() = $name]'
+
+ def __init__(self, cib_elem, local_dir, is_local=True, get_schema_fn=None):
+ self.rng_docs = {}
+ Schema.__init__(self, cib_elem, local_dir, is_local=is_local, get_schema_fn=get_schema_fn)
+
+ def update_schema(self):
+ self.rng_docs = {}
+ self.schema_str_docs = {}
+ self.update_rng_docs(self.validate_name, self.schema_filename)
+ return True
+
+ def update_rng_docs(self, validate_name, filename):
+ self.rng_docs[filename] = self.find_start_rng_node(validate_name, filename)
+ if self.rng_docs[filename] is None:
+ return
+ for extern_ref in self.rng_docs[filename][0].xpath(self.expr, name="externalRef"):
+ href_value = extern_ref.get("href")
+ if self.rng_docs.get(href_value) is None:
+ self.update_rng_docs(validate_name, href_value)
+
+ def find_start_rng_node(self, validate_name, filename):
+ schema_info = validate_name + " " + filename
+ crm_schema = self.get_schema_fn(validate_name,
+ os.path.join(self.local_dir, filename))
+ if not crm_schema:
+ raise PacemakerError("Cannot get the Relax-NG schema: " + schema_info)
+
+ self.schema_str_docs[filename] = crm_schema
+
+ try:
+ grammar = etree.fromstring(crm_schema.encode('utf-8'))
+ except Exception as msg:
+ raise PacemakerError("Failed to parse the Relax-NG schema: " + str(msg) + schema_info)
+
+ start_nodes = grammar.xpath(self.expr, name="start")
+ if len(start_nodes) > 0:
+ start_node = start_nodes[0]
+ return (grammar, start_node)
+ else:
+ raise PacemakerError("Cannot find the start in the Relax-NG schema: " + schema_info)
+
+ def find_in_grammar(self, grammar, node, name):
+ for elem_node in grammar.xpath(self.expr, name=node):
+ if elem_node.get("name") == name:
+ return elem_node
+ return None
+
+ def find_elem(self, elem_name):
+ elem_node = None
+ for (grammar, start_node) in list(self.rng_docs.values()):
+ elem_node = self.find_in_grammar(grammar, 'element', elem_name)
+ if elem_node is not None:
+ return (grammar, elem_node)
+ return None
+
+ def rng_xpath(self, xpath, namespaces=None):
+ return [grammar.xpath(xpath, namespaces=namespaces)
+ for grammar, _ in list(self.rng_docs.values())]
+
+ def get_sub_rng_nodes(self, grammar, rng_node):
+ sub_rng_nodes = []
+ for child_node in rng_node.iterchildren():
+ if not isinstance(child_node.tag, str):
+ continue
+ local_tag = get_local_tag(child_node)
+ if local_tag == "ref":
+ def_node = self.find_in_grammar(grammar, 'define', child_node.get('name'))
+ if def_node is not None:
+ sub_rng_nodes.extend(self.get_sub_rng_nodes(grammar, def_node))
+ elif local_tag == "externalRef":
+ nodes = self.get_sub_rng_nodes(*self.rng_docs[child_node.get("href")])
+ sub_rng_nodes.extend(nodes)
+ elif local_tag in ["element", "attribute", "value", "data", "text"]:
+ sub_rng_nodes.append([(grammar, child_node)])
+ elif local_tag in ["interleave", "optional", "zeroOrMore",
+ "choice", "group", "oneOrMore"]:
+ nodes = self.get_sub_rng_nodes(grammar, child_node)
+ for node in nodes:
+ node.append(copy.deepcopy(child_node))
+ sub_rng_nodes.extend(nodes)
+ return sub_rng_nodes
+
+ def sorted_sub_rng_nodes_by_name(self, obj_type):
+ rng_node = self.find_elem(obj_type)
+ if rng_node is None or rng_node[1] is None:
+ return None
+ return self.sorted_sub_rng_nodes_by_node(*rng_node)
+
+ def sorted_sub_rng_nodes_by_node(self, grammar, rng_node):
+ sub_rng_nodes = self.get_sub_rng_nodes(grammar, rng_node)
+ sorted_nodes = {}
+ for sub_rng_node in sub_rng_nodes:
+ name = get_local_tag(sub_rng_node[0][1])
+ if sorted_nodes.get(name) is None:
+ sorted_nodes[name] = []
+ sorted_nodes[name].append(sub_rng_node)
+ return sorted_nodes
+
+ def get_elem_attr_objs(self, obj_type):
+ return self.sorted_sub_rng_nodes_by_name(obj_type).get("attribute", [])
+
+ def get_sub_elem_objs(self, obj_type):
+ return self.sorted_sub_rng_nodes_by_name(obj_type).get("element", [])
+
+ def find_decl(self, rng_node, name, first=True):
+ decl_node_index = 0
+ for decl_node in rng_node[1:]:
+ if get_local_tag(decl_node) == name:
+ decl_node_index = rng_node.index(decl_node) - len(rng_node)
+ if first:
+ break
+ return decl_node_index
+
+ def get_sorted_decl_nodes(self, decl_nodes_list, decl_type):
+ sorted_nodes = []
+ for rng_nodes in decl_nodes_list:
+ rng_node = rng_nodes.get(decl_type)
+ if rng_node is not None and rng_node not in sorted_nodes:
+ sorted_nodes.append(rng_node)
+ return sorted_nodes
+
+ def get_obj_name(self, rng_node):
+ return rng_node[0][1].get("name")
+
+ def get_attr_type(self, attr_rng_node):
+ sub_rng_nodes = self.sorted_sub_rng_nodes_by_node(*attr_rng_node[0])
+ for sub_rng_node in sub_rng_nodes.get("data", []):
+ return sub_rng_nodes["data"][0][0][1].get("type")
+
+ return None
+
+ def get_attr_values(self, attr_rng_node):
+ attr_values = []
+ sub_rng_nodes = self.sorted_sub_rng_nodes_by_node(*attr_rng_node[0])
+ for sub_rng_node in sub_rng_nodes.get("value", []):
+ attr_values.append(sub_rng_node[0][1].text)
+
+ return attr_values
+
+ def get_attr_default(self, attr_rng_node):
+ return attr_rng_node[0][1].get("ann:defaultValue")
+
+ def _get_by_obj(self, rng_obj, typ, sub_set):
+ """
+ Used to select attributes or elements based on
+ sub_set selector and optionality.
+ typ: 'attribute' or 'element'
+ sub_set: 'a'(all), 'r'(required), 'o'(optional)
+ """
+ grammar, rng_node = rng_obj
+ if rng_node is None:
+ return None
+
+ selected = []
+ sub_rng_nodes = self.get_sub_rng_nodes(grammar, rng_node)
+ for node in sub_rng_nodes:
+ head = node[0][1]
+ if get_local_tag(head) != typ:
+ continue
+ name = head.get("name")
+ if selected.count(name):
+ continue
+ # the complicated case: 'choice'
+ optional = any(self.find_decl(node, opt) != 0
+ for opt in ("optional", "zeroOrMore"))
+ if subset_select(sub_set, optional):
+ selected.append(name)
+ return selected
+
+ def get_elem_attrs_by_obj(self, rng_obj, sub_set='a'):
+ "sub_set: 'a'(all), 'r'(required), 'o'(optional)"
+ return self._get_by_obj(rng_obj, 'attribute', sub_set=sub_set)
+
+ def get_sub_elems_by_obj(self, rng_obj, sub_set='a'):
+ "sub_set: 'a'(all), 'r'(required), 'o'(optional)"
+ return self._get_by_obj(rng_obj, 'element', sub_set=sub_set)
diff --git a/crmsh/parallax.py b/crmsh/parallax.py
new file mode 100644
index 0000000..c145c4f
--- /dev/null
+++ b/crmsh/parallax.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2019 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+import typing
+
+from crmsh.sh import Utils
+from crmsh.prun import prun
+
+
+Error = prun.PRunError
+
+
+def parallax_call(nodes, cmd, *, timeout_seconds: int = -1):
+ """
+ Executes the given command on a set of hosts, collecting the output, and raise exception when error occurs
+ nodes: a set of hosts
+ cmd: command
+ timeout_seconds: the timeout in seconds.
+ Returns [(host, (rc, stdout, stdin)), ...], or raises ValueError when any one of the rc != 0
+ """
+ results = prun.prun({node: cmd for node in nodes}, timeout_seconds=timeout_seconds)
+ for node, result in results.items():
+ if isinstance(result, prun.PRunError):
+ raise ValueError('Failed to run command {} on {}@{}: {}'.format(cmd, result.user, result.host, result))
+ elif result.returncode != 0:
+ raise ValueError("Failed on {}: {}".format(
+ node,
+ Utils.decode_str(result.stderr) if result.stderr is not None else None,
+ ))
+ return [(node, (result.returncode, result.stdout, result.stderr)) for node, result in results.items()]
+
+
+def parallax_slurp(nodes: typing.Sequence[str], localdir, filename) -> typing.List[typing.Tuple[str, typing.Union[str, Error]]]:
+ """
+ Copies from the remote node to the local node
+ nodes: a set of hosts
+ localdir: localpath
+ filename: remote filename want to slurp
+ Returns [(host, localpath), ...] or raises ValueError when any one of hosts fails.
+ """
+ results = prun.pfetch_from_remote(nodes, filename, localdir)
+ for node, result in results.items():
+ if isinstance(result, prun.PRunError):
+ raise ValueError("Failed on {}@{}: {}".format(result.user, node, result))
+ return [(k, v) for k, v in results.items()]
+
+
+def parallax_copy(nodes, src, dst, recursive=False, *, timeout_seconds: int = -1):
+ """
+ Copies from the local node to a set of remote hosts
+ nodes: a set of hosts
+ src: local path
+ dst: remote path
+ recursive: whether to copy directories recursively
+ timeout_seconds: the timeout in seconds.
+ Returns None, or raises ValueError when any one of hosts fails.
+ """
+ results = prun.pcopy_to_remote(src, nodes, dst, recursive, timeout_seconds=timeout_seconds)
+ for node, exc in results.items():
+ if exc is not None:
+ raise ValueError("Failed on {}@{}: {}".format(exc.user, node, exc))
+
+
+def parallax_run(nodes, cmd):
+ """
+ Executes the given command on a set of hosts, collecting the output and any error
+ nodes: a set of hosts
+ cmd: command
+
+ Returns [(host, (rc, stdout, stdin)), ...], or raises ValueError when any one of the hosts fails to start running
+ the command.
+ """
+ results = prun.prun({node: cmd for node in nodes})
+ for value in results.values():
+ if isinstance(value, prun.PRunError):
+ raise ValueError('Failed to run command {} on {}@{}: {}'.format(cmd, value.user, value.host, value))
+ return {node: (result.returncode, result.stdout, result.stderr) for node, result in results.items()}
diff --git a/crmsh/parse.py b/crmsh/parse.py
new file mode 100644
index 0000000..89f0962
--- /dev/null
+++ b/crmsh/parse.py
@@ -0,0 +1,1841 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import shlex
+import re
+import inspect
+from lxml import etree
+from . import ra
+from . import constants
+from .ra import disambiguate_ra_type, ra_type_validate
+from . import schema
+from .utils import keyword_cmp, verify_boolean, lines2cli
+from .utils import get_boolean, olist, canonical_boolean
+from .utils import handle_role_for_ocf_1_1, compatible_role
+from . import xmlutil
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+_NVPAIR_RE = re.compile(r'([^=@$][^=]*)=(.*)$')
+_NVPAIR_ID_RE = re.compile(r'\$([^:=]+)(?::(.+))?=(.*)$')
+_NVPAIR_REF_RE = re.compile(r'@([^:]+)(?::(.+))?$')
+_NVPAIR_KEY_RE = re.compile(r'([^:=]+)$', re.IGNORECASE)
+_IDENT_RE = re.compile(r'([a-z0-9_#$-][^=]*)$', re.IGNORECASE)
+_DISPATCH_RE = re.compile(r'[a-z0-9_]+$', re.IGNORECASE)
+_DESC_RE = re.compile(r'description=(.+)$', re.IGNORECASE)
+_ATTR_RE = re.compile(r'\$?([^=]+)=(.*)$')
+_ALERT_PATH_RE = re.compile(r'(.*)$')
+_RESOURCE_RE = re.compile(r'([a-z_#$][^=]*)$', re.IGNORECASE)
+_IDSPEC_RE = re.compile(r'(\$id-ref|\$id)=(.*)$', re.IGNORECASE)
+_ID_RE = re.compile(r'\$id=(.*)$', re.IGNORECASE)
+_ID_NEW_RE = re.compile(r'([\w-]+):$', re.IGNORECASE)
+_SCORE_RE = re.compile(r"([^:]+):$")
+_ROLE_RE = re.compile(r"\$?role=(.+)$", re.IGNORECASE)
+_BOOLOP_RE = re.compile(r'(%s)$' % ('|'.join(constants.boolean_ops)), re.IGNORECASE)
+_UNARYOP_RE = re.compile(r'(%s)$' % ('|'.join(constants.unary_ops)), re.IGNORECASE)
+_ACL_RIGHT_RE = re.compile(r'(%s)$' % ('|'.join(constants.acl_rule_names)), re.IGNORECASE)
+_ROLE_REF_RE = re.compile(r'role:(.+)$', re.IGNORECASE)
+_PERM_RE = re.compile(r"([^:]+)(?::(.+))?$", re.I)
+_UNAME_RE = re.compile(r'([^:]+)(:(normal|member|ping|remote))?$', re.IGNORECASE)
+_TEMPLATE_RE = re.compile(r'@(.+)$')
+_RA_TYPE_RE = re.compile(r'[a-z0-9_:-]+$', re.IGNORECASE)
+_TAG_RE = re.compile(r"([a-zA-Z_][^\s:]*):?$")
+_ROLE2_RE = re.compile(r"role=(.+)$", re.IGNORECASE)
+_TARGET_RE = re.compile(r'([^:]+):$')
+_TARGET_ATTR_RE = re.compile(r'attr:([\w-]+)=([\w-]+)$', re.IGNORECASE)
+_TARGET_PATTERN_RE = re.compile(r'pattern:(.+)$', re.IGNORECASE)
+TERMINATORS = ('params', 'meta', 'utilization', 'operations', 'op', 'op_params', 'op_meta', 'rule', 'attributes')
+
+
+class ParseError(Exception):
+ '''
+ Raised by parsers when parsing fails.
+ No error message, parsers should write
+ error messages before raising the exception.
+ '''
+
+
+class Validation(object):
+ def resource_roles(self):
+ 'returns list of valid resource roles'
+ return schema.rng_attr_values('resource_set', 'role')
+
+ def resource_actions(self):
+ 'returns list of valid resource actions'
+ return schema.rng_attr_values('resource_set', 'action')
+
+ def date_ops(self):
+ 'returns list of valid date operations'
+ return schema.rng_attr_values_l('date_expression', 'operation')
+
+ def expression_types(self):
+ 'returns list of valid expression types'
+ return schema.rng_attr_values_l('expression', 'type')
+
+ def rsc_order_kinds(self):
+ return schema.rng_attr_values('rsc_order', 'kind')
+
+ def class_provider_type(self, value):
+ """
+ Unravel [class:[provider:]]type
+ returns: (class, provider, type)
+ """
+ c_p_t = disambiguate_ra_type(value)
+ if not ra_type_validate(value, *c_p_t):
+ return None
+ return c_p_t
+
+ def canonize(self, value, lst):
+ 'case-normalizes value to what is in lst'
+ value = value.lower()
+ for x in lst:
+ if value == x.lower():
+ return x
+ return None
+
+ def classify_role(self, role):
+ if not role:
+ return role, None
+ elif role in olist(self.resource_roles()):
+ return self.canonize(role, self.resource_roles()), 'role'
+ elif role.isdigit():
+ return role, 'instance'
+ return role, None
+
+ def classify_action(self, action):
+ if not action:
+ return action, None
+ elif action in olist(self.resource_actions()):
+ return self.canonize(action, self.resource_actions()), 'action'
+ elif action.isdigit():
+ return action, 'instance'
+ return action, None
+
+ def op_attributes(self):
+ return olist(schema.get('attr', 'op', 'a'))
+
+ def acl_2_0(self):
+ vname = schema.validate_name()
+ sp = vname.split('-')
+ try:
+ return sp[0] == 'pacemaker' and sp[1] == 'next' or float(sp[1]) >= 2.0
+ except Exception:
+ return False
+
+ def node_type_optional(self):
+ ns = {'t': 'http://relaxng.org/ns/structure/1.0'}
+ path = '//t:element[@name="nodes"]'
+ path = path + '//t:element[@name="node"]/t:optional/t:attribute[@name="type"]'
+ has_optional = schema.rng_xpath(path, namespaces=ns)
+ return len(has_optional) > 0
+
+
+validator = Validation()
+
+
+class BaseParser(object):
+ _BINOP_RE = None
+ _VALUE_SOURCE_RE = None
+
+ def parse(self, cmd):
+ "Called by do_parse(). Raises ParseError if parsing fails."
+ raise NotImplementedError
+
+ def err(self, msg, context=None, token=None):
+ "Report a parse error and abort."
+ if token is None and self.has_tokens():
+ token = self._cmd[self._currtok]
+ if context is None:
+ context = self._cmd[0]
+ logger_utils.syntax_err(self._cmd, context=context, token=token, msg=msg)
+ raise ParseError
+
+ def begin(self, cmd, min_args=-1):
+ self._cmd = cmd
+ self._currtok = 0
+ self._lastmatch = None
+ if min_args > -1 and len(cmd) < min_args + 1:
+ self.err("Expected at least %d arguments" % (min_args))
+
+ def begin_dispatch(self, cmd, min_args=-1):
+ """
+ Begin parsing cmd.
+ Dispatches to parse_<resource> based on the first token.
+ """
+ self.begin(cmd, min_args=min_args)
+ return self.match_dispatch(errmsg="Unknown command")
+
+ def do_parse(self, cmd, ignore_empty, complete_advised):
+ """
+ Called by CliParser. Calls parse()
+ Parsers should pass their return value through this method.
+ """
+ self.ignore_empty = ignore_empty
+ self.complete_advised = complete_advised
+ out = self.parse(cmd)
+ if self.has_tokens():
+ self.err("Unknown arguments: " + ' '.join(self._cmd[self._currtok:]))
+ return out
+
+ def try_match(self, rx):
+ """
+ Try to match the given regex with the curren token.
+ rx: compiled regex or string
+ returns: the match object, if the match is successful
+ """
+ tok = self.current_token()
+ if not tok:
+ return None
+ if isinstance(rx, str):
+ if not rx.endswith('$'):
+ rx = rx + '$'
+ self._lastmatch = re.match(rx, tok, re.IGNORECASE)
+ else:
+ self._lastmatch = rx.match(tok)
+ if self._lastmatch is not None:
+ if not self.has_tokens():
+ self.err("Unexpected end of line")
+ self._currtok += 1
+ return self._lastmatch
+
+ def match(self, rx, errmsg=None):
+ """
+ Match the given regex with the current token.
+ If match fails, parse is aborted and an error reported.
+ rx: compiled regex or string.
+ errmsg: optional error message if match fails.
+ Returns: The matched token.
+ """
+ if not self.try_match(rx):
+ if errmsg:
+ self.err(errmsg)
+ elif isinstance(rx, str):
+ self.err("Expected " + rx)
+ else:
+ self.err("Expected " + rx.pattern.rstrip('$'))
+ return self.matched(0)
+
+ def matched(self, idx=0):
+ """
+ After a successful match, returns
+ the groups generated by the match.
+ """
+ if hasattr(self._lastmatch, "group"):
+ return self._lastmatch.group(idx)
+ return None
+
+ def lastmatch(self):
+ return self._lastmatch
+
+ def rewind(self):
+ "useful for when validation fails, to undo the match"
+ if self._currtok > 0:
+ self._currtok -= 1
+
+ def current_token(self):
+ if self.has_tokens():
+ return self._cmd[self._currtok]
+ return None
+
+ def has_tokens(self):
+ return self._currtok < len(self._cmd)
+
+ def match_rest(self):
+ '''
+ matches and returns the rest
+ of the tokens in a list
+ '''
+ ret = self._cmd[self._currtok:]
+ self._currtok = len(self._cmd)
+ return ret
+
+ def match_any(self):
+ if not self.has_tokens():
+ self.err("Unexpected end of line")
+ tok = self.current_token()
+ self._currtok += 1
+ self._lastmatch = tok
+ return tok
+
+ def match_nvpairs_bykey(self, valid_keys, minpairs=1):
+ """
+ matches string of p=v | p tokens, but only if p is in valid_keys
+ Returns list of <nvpair> tags
+ """
+ _KEY_RE = re.compile(r'(%s)=(.+)$' % '|'.join(valid_keys))
+ _NOVAL_RE = re.compile(r'(%s)$' % '|'.join(valid_keys))
+ ret = []
+ while True:
+ if self.try_match(_KEY_RE):
+ ret.append(xmlutil.nvpair(self.matched(1), self.matched(2)))
+ elif self.try_match(_NOVAL_RE):
+ ret.append(xmlutil.nvpair(self.matched(1), ""))
+ else:
+ break
+ if len(ret) < minpairs:
+ if minpairs == 1:
+ self.err("Expected at least one name-value pair")
+ else:
+ self.err("Expected at least %d name-value pairs" % (minpairs))
+ return ret
+
+ def match_nvpairs(self, terminator=None, minpairs=1, allow_empty=True):
+ """
+ Matches string of p=v tokens
+ Returns list of <nvpair> tags
+ p tokens are also accepted and an nvpair tag with no value attribute
+ is created, as long as they are not in the terminator list
+ """
+ ret = []
+ if terminator is None:
+ terminator = TERMINATORS
+ while True:
+ tok = self.current_token()
+ if tok is not None and tok.lower() in terminator:
+ break
+ elif self.try_match(_NVPAIR_REF_RE):
+ ret.append(xmlutil.nvpair_ref(self.matched(1),
+ self.matched(2)))
+ elif self.try_match(_NVPAIR_ID_RE):
+ ret.append(xmlutil.nvpair_id(self.matched(1),
+ self.matched(2),
+ self.matched(3)))
+ elif self.try_match(_NVPAIR_RE):
+ if not allow_empty and not self.matched(2):
+ self.err("Empty value for {} is not allowed".format(self.matched(1)))
+ ret.append(xmlutil.nvpair(self.matched(1),
+ self.matched(2)))
+ elif len(terminator) and self.try_match(_NVPAIR_KEY_RE):
+ ret.append(xmlutil.new("nvpair", name=self.matched(1)))
+ else:
+ break
+ if len(ret) < minpairs:
+ if minpairs == 1:
+ self.err("Expected at least one name-value pair")
+ else:
+ self.err("Expected at least %d name-value pairs" % (minpairs))
+ return ret
+
+ def try_match_nvpairs(self, name, terminator=None):
+ """
+ Matches sequence of <name> [<key>=<value> [<key>=<value> ...] ...]
+ """
+ if self.try_match(name):
+ self._lastmatch = self.match_nvpairs(terminator=terminator, minpairs=1)
+ else:
+ self._lastmatch = []
+ return self._lastmatch
+
+ def match_identifier(self):
+ return self.match(_IDENT_RE, errmsg="Expected identifier")
+
+ def match_resource(self):
+ return self.match(_RESOURCE_RE, errmsg="Expected resource")
+
+ def match_idspec(self):
+ """
+ matches $id=<id> | $id-ref=<id>
+ matched(1) = $id|$id-ref
+ matched(2) = <id>
+ """
+ return self.match(_IDSPEC_RE, errmsg="Expected $id-ref=<id> or $id=<id>")
+
+ def try_match_idspec(self):
+ """
+ matches $id=<value> | $id-ref=<value>
+ matched(1) = $id|$id-ref
+ matched(2) = <value>
+ """
+ return self.try_match(_IDSPEC_RE)
+
+ def try_match_initial_id(self):
+ """
+ Used as the first match on certain commands
+ like node and property, to match either
+ node $id=<id>
+ or
+ node <id>:
+ """
+ m = self.try_match(_ID_RE)
+ if m:
+ return m
+ return self.try_match(_ID_NEW_RE)
+
+ def match_split(self):
+ """
+ matches value[:value]
+ """
+ if not self.current_token():
+ self.err("Expected value[:value]")
+ sp = self.current_token().split(':')
+ if len(sp) > 2:
+ self.err("Expected value[:value]")
+ while len(sp) < 2:
+ sp.append(None)
+ self.match_any()
+ return sp
+
+ def match_dispatch(self, errmsg=None):
+ """
+ Match on the next token. Looks
+ for a method named parse_<token>.
+ If found, the named function is called.
+ Else, an error is reported.
+ """
+ t = self.match(_DISPATCH_RE, errmsg=errmsg)
+ t = 'parse_' + t.lower()
+ if hasattr(self, t) and callable(getattr(self, t)):
+ return getattr(self, t)()
+ self.rewind() # rewind for more accurate error message
+ self.err(errmsg)
+
+ def try_match_description(self):
+ """
+ reads a description=? token if one is next
+ """
+ if self.try_match(_DESC_RE):
+ return self.matched(1)
+ return None
+
+ def match_until(self, end_token):
+ tokens = []
+ while self.current_token() is not None and self.current_token() != end_token:
+ tokens.append(self.match_any())
+ return tokens
+
+ def match_attr_list(self, name, tag, allow_empty=True, terminator=None):
+ """
+ matches [$id=<id>] [<score>:] <n>=<v> <n>=<v> ... | $id-ref=<id-ref>
+ if matchname is False, matches:
+ <n>=<v> <n>=<v> ...
+ """
+ from .cibconfig import cib_factory
+
+ xmlid = None
+ if self.try_match_idspec():
+ if self.matched(1) == '$id-ref':
+ r = xmlutil.new(tag)
+ ref = cib_factory.resolve_id_ref(name, self.matched(2))
+ r.set('id-ref', ref)
+ return r
+ else:
+ xmlid = self.matched(2)
+ score = None
+ if self.try_match(_SCORE_RE):
+ score = self.matched(1)
+ rules = self.match_rules()
+ values = self.match_nvpairs(minpairs=0, terminator=terminator)
+ if (allow_empty, xmlid, score, len(rules), len(values)) == (False, None, None, 0, 0):
+ return None
+ return xmlutil.attributes(tag, rules, values, xmlid=xmlid, score=score)
+
+ def match_attr_lists(self, name_map, implicit_initial=None, terminator=None):
+ """
+ generator which matches attr_lists
+ name_map: maps CLI name to XML name
+ """
+ to_match = '|'.join(list(name_map.keys()))
+ if self.try_match(to_match):
+ name = self.matched(0).lower()
+ yield self.match_attr_list(name, name_map[name], terminator=terminator)
+ elif implicit_initial is not None:
+ attrs = self.match_attr_list(implicit_initial,
+ name_map[implicit_initial],
+ allow_empty=False,
+ terminator=terminator)
+ if attrs is not None:
+ yield attrs
+ while self.try_match(to_match):
+ name = self.matched(0).lower()
+ yield self.match_attr_list(name, name_map[name], terminator=terminator)
+
+ def match_rules(self):
+ '''parse rule definitions'''
+ from .cibconfig import cib_factory
+
+ rules = []
+ while self.try_match('rule'):
+ rule = xmlutil.new('rule')
+ rules.append(rule)
+ idref = False
+ if self.try_match_idspec():
+ idtyp, idval = self.matched(1)[1:], self.matched(2)
+ if idtyp == 'id-ref':
+ idval = cib_factory.resolve_id_ref('rule', idval)
+ idref = True
+ rule.set(idtyp, idval)
+ if self.try_match(_ROLE_RE):
+ rule.set('role', handle_role_for_ocf_1_1(self.matched(1)))
+ if idref:
+ continue
+ if self.try_match(_SCORE_RE):
+ rule.set(*self.validate_score(self.matched(1)))
+ else:
+ rule.set('score', 'INFINITY')
+ boolop, exprs = self.match_rule_expression()
+ if boolop and not keyword_cmp(boolop, 'and'):
+ rule.set('boolean-op', boolop)
+ for expr in exprs:
+ rule.append(expr)
+ return rules
+
+ def match_rule_expression(self):
+ """
+ expression :: <simple_exp> [bool_op <simple_exp> ...]
+ bool_op :: or | and
+ simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+ type :: string | version | number
+ binary_op :: lt | gt | lte | gte | eq | ne
+ unary_op :: defined | not_defined
+
+ date_expr :: lt <end>
+ | gt <start>
+ | in_range start=<start> end=<end>
+ | in_range start=<start> <duration>
+ | date_spec <date_spec>
+ duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+ """
+ boolop = None
+ exprs = [self._match_simple_exp()]
+ while self.try_match(_BOOLOP_RE):
+ if boolop and self.matched(1) != boolop:
+ self.err("Mixing bool ops not allowed: %s != %s" % (boolop, self.matched(1)))
+ else:
+ boolop = self.matched(1)
+ exprs.append(self._match_simple_exp())
+ return boolop, exprs
+
+ def _match_simple_exp(self):
+ if self.try_match('date'):
+ return self.match_date()
+ elif self.try_match(_UNARYOP_RE):
+ unary_op = self.matched(1)
+ attr = self.match_identifier()
+ return xmlutil.new('expression', operation=unary_op, attribute=attr)
+ else:
+ attr = self.match_identifier()
+ if not self._BINOP_RE:
+ self._BINOP_RE = re.compile(r'((%s):)?(%s)$' % (
+ '|'.join(validator.expression_types()),
+ '|'.join(constants.binary_ops)), re.IGNORECASE)
+ self.match(self._BINOP_RE)
+ optype = self.matched(2)
+ binop = self.matched(3)
+ node = xmlutil.new('expression', operation=binop, attribute=attr)
+ xmlutil.maybe_set(node, 'type', optype)
+ val = self.match_any()
+ if not self._VALUE_SOURCE_RE:
+ self._VALUE_SOURCE_RE = re.compile(r"^(?P<val_src>[^\s{}]+)({(?P<val>\S+)})?$")
+ val_src_match = re.match(self._VALUE_SOURCE_RE, val)
+ if val_src_match.group('val') is None:
+ node.set('value', val)
+ else:
+ node.set('value', val_src_match.group('val'))
+ node.set('value-source', val_src_match.group('val_src'))
+ return node
+
+ def match_date(self):
+ """
+ returns for example:
+ <date_expression id="" operation="op">
+ <date_spec hours="9-16"/>
+ </date_expression>
+ """
+ node = xmlutil.new('date_expression')
+
+ date_ops = validator.date_ops()
+ # spec -> date_spec
+ if 'date_spec' in date_ops:
+ date_ops.append('spec')
+ # in -> in_range
+ if 'in_range' in date_ops:
+ date_ops.append('in')
+ self.match('(%s)$' % ('|'.join(date_ops)))
+ op = self.matched(1)
+ opmap = {'in': 'in_range', 'spec': 'date_spec'}
+ node.set('operation', opmap.get(op, op))
+ if op in olist(constants.simple_date_ops):
+ # lt|gt <value>
+ val = self.match_any()
+ if keyword_cmp(op, 'lt'):
+ node.set('end', val)
+ else:
+ node.set('start', val)
+ return node
+ elif op in ('in_range', 'in'):
+ # date in start=<start> end=<end>
+ # date in start=<start> <duration>
+ valid_keys = list(constants.in_range_attrs) + constants.date_spec_names
+ vals = self.match_nvpairs_bykey(valid_keys, minpairs=2)
+ return xmlutil.set_date_expression(node, 'duration', vals)
+ elif op in ('date_spec', 'spec'):
+ valid_keys = constants.date_spec_names
+ vals = self.match_nvpairs_bykey(valid_keys, minpairs=1)
+ return xmlutil.set_date_expression(node, 'date_spec', vals)
+ else:
+ self.err("Unknown date operation '%s', please upgrade crmsh" % (op))
+
+ def validate_score(self, score, noattr=False, to_kind=False):
+ if not noattr and score in olist(constants.score_types):
+ return ["score", constants.score_types[score.lower()]]
+ elif re.match("^[+-]?(inf(inity)?|INF(INITY)?|[0-9]+)$", score):
+ score = re.sub("inf(inity)?|INF(INITY)?", "INFINITY", score)
+ if to_kind:
+ return ["kind", score_to_kind(score)]
+ else:
+ return ["score", score]
+ if noattr:
+ # orders have the special kind attribute
+ kind = validator.canonize(score, validator.rsc_order_kinds())
+ if not kind:
+ self.err("Invalid kind: " + score)
+ return ['kind', kind]
+ else:
+ return ['score-attribute', score]
+
+ def match_arguments(self, out, name_map, implicit_initial=None, terminator=None):
+ """
+ [<name> attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value> ...] ...]
+
+ attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+ id_spec :: $id=<id> | $id-ref=<id>
+ op_type :: start | stop | monitor
+
+ implicit_initial: when matching attr lists, if none match at first
+ parse an implicit initial token and then continue.
+ This is so for example: primitive foo Dummy state=1 is accepted when
+ params is the implicit initial.
+ """
+ names = olist(list(name_map.keys()))
+ oplist = olist([op for op in name_map if op.lower() in ('operations', 'op')])
+ for op in oplist:
+ del name_map[op]
+ bundle_list = olist([op for op in name_map if op.lower()
+ in ('docker', 'rkt', 'network', 'port-mapping', 'storage', 'primitive')])
+ for bl in bundle_list:
+ del name_map[bl]
+ initial = True
+ while self.has_tokens():
+ t = self.current_token().lower()
+ if t in names:
+ initial = False
+ if t in oplist:
+ self.match_operations(out, t == 'operations')
+ if t in bundle_list:
+ self.match_container(out, t)
+ else:
+ if bundle_list:
+ terminator = ['network', 'storage', 'primitive']
+ for attr_list in self.match_attr_lists(name_map, terminator=terminator):
+ out.append(attr_list)
+ elif initial:
+ initial = False
+ for attr_list in self.match_attr_lists(name_map,
+ implicit_initial=implicit_initial,
+ terminator=terminator):
+ out.append(attr_list)
+ else:
+ break
+
+ self.complete_advised_ops(out)
+
+ def complete_advised_ops(self, out):
+ """
+ Complete operation actions advised values
+ """
+ if not self.complete_advised or out.tag != "primitive":
+ return
+ ra_inst = ra.RAInfo(out.get('class'), out.get('type'), out.get('provider'))
+ ra_actions_dict = ra_inst.actions()
+ if not ra_actions_dict:
+ return
+
+ def extract_advised_value(advised_dict, action, attr, role=None):
+ adv_attr_value = None
+ try:
+ if action == "monitor":
+ if role:
+ for monitor_item in advised_dict[action]:
+ if compatible_role(role, monitor_item['role']):
+ adv_attr_value = monitor_item[attr]
+ else:
+ adv_attr_value = advised_dict[action][0][attr]
+ else:
+ adv_attr_value = advised_dict[action][attr]
+ except KeyError:
+ pass
+ return adv_attr_value
+
+ action_advised_attr_dict = {k:v for k, v in ra_actions_dict.items() if k in constants.ADVISED_ACTION_LIST}
+ operations_node = out.find("operations")
+ configured_action_list = []
+ # no operations configured
+ if operations_node is None:
+ operations_node = xmlutil.child(out, 'operations')
+ # has operations configured
+ else:
+ op_nodes_list = operations_node.findall("op")
+ for op_node in op_nodes_list:
+ action = op_node.get('name')
+ # complete advised value if interval or timeout not configured
+ adv_interval = extract_advised_value(action_advised_attr_dict, action, 'interval', op_node.get('role')) or \
+ constants.DEFAULT_INTERVAL_IN_ACTION
+ adv_timeout = extract_advised_value(action_advised_attr_dict, action, 'timeout', op_node.get('role'))
+ if op_node.get('interval') is None:
+ op_node.set('interval', adv_interval)
+ if op_node.get('timeout') is None and adv_timeout:
+ op_node.set('timeout', adv_timeout)
+ configured_action_list.append(action)
+
+ for action in action_advised_attr_dict:
+ if action in configured_action_list:
+ continue
+ # complete advised value if the operation not configured
+ value = action_advised_attr_dict[action]
+ # for multi actions, like multi monitor
+ if isinstance(value, list):
+ for v_dict in value:
+ op_node = xmlutil.new('op', name=action)
+ for k, v in v_dict.items():
+ # set normal attributes
+ if k in constants.ADVISED_KEY_LIST:
+ op_node.set(k, handle_role_for_ocf_1_1(v))
+ operations_node.append(op_node)
+ else:
+ op_node = xmlutil.new('op', name=action, **value)
+ operations_node.append(op_node)
+
+ out.append(operations_node)
+
+ def match_container(self, out, _type):
+ container_node = None
+ self.match(_type)
+ all_attrs = self.match_nvpairs(minpairs=0, terminator=['network', 'storage', 'meta', 'primitive'])
+
+ if _type != "primitive":
+ exist_node = out.find(_type)
+ if exist_node is None:
+ container_node = xmlutil.new(_type)
+ else:
+ container_node = exist_node
+
+ child_flag = False
+ for nvp in all_attrs:
+ if nvp.get('name') in ['port-mapping', 'storage-mapping']:
+ inst_attrs = xmlutil.child(container_node, nvp.get('name'))
+ child_flag = True
+ continue
+ if child_flag:
+ inst_attrs.set(nvp.get('name'), nvp.get('value'))
+ else:
+ container_node.set(nvp.get('name'), nvp.get('value'))
+ out.append(container_node)
+
+ else:
+ if len(all_attrs) != 1 or all_attrs[0].get('value'):
+ self.err("Expected primitive reference, got {}".format(", ".join("{}={}".format(nvp.get('name'), nvp.get('value') or "") for nvp in all_attrs)))
+ xmlutil.child(out, 'crmsh-ref', id=all_attrs[0].get('name'))
+
+ def match_op(self, out, pfx='op'):
+ """
+ op <optype> [<n>=<v> ...]
+
+ to:
+ <op name="monitor" timeout="30" interval="10" id="p_mysql-monitor-10">
+ <instance_attributes id="p_mysql-monitor-10-instance_attributes">
+ <nvpair name="depth" value="0" id="p_mysql-monitor-10-instance_attributes-depth"/>
+ </instance_attributes>
+ </op>
+ """
+ self.match('op')
+ op_type = self.match_identifier()
+ all_attrs = self.match_nvpairs(minpairs=0)
+ node = xmlutil.new('op', name=op_type)
+ if not any(nvp.get('name') == 'interval' for nvp in all_attrs) and op_type != "monitor":
+ all_attrs.append(xmlutil.nvpair('interval', '0s'))
+ valid_attrs = validator.op_attributes()
+ inst_attrs = None
+ for nvp in all_attrs:
+ if nvp.get('name') in valid_attrs:
+ if inst_attrs is not None:
+ self.err("Attribute order error: {} must appear before any instance attribute".format(nvp.get('name')))
+ node.set(nvp.get('name'), nvp.get('value'))
+ else:
+ if inst_attrs is None:
+ inst_attrs = xmlutil.child(node, 'instance_attributes')
+ inst_attrs.append(nvp)
+ if inst_attrs is not None:
+ node.append(inst_attrs)
+ for attr_list in self.match_attr_lists({'op_params': 'instance_attributes',
+ 'op_meta': 'meta_attributes'},
+ implicit_initial='op_params'):
+ node.append(attr_list)
+ out.append(node)
+
+ def match_operations(self, out, match_id):
+ from .cibconfig import cib_factory
+
+ def is_op():
+ return self.has_tokens() and self.current_token().lower() == 'op'
+ if match_id:
+ self.match('operations')
+ node = xmlutil.child(out, 'operations')
+ if match_id:
+ self.match_idspec()
+ match_id = self.matched(1)[1:].lower()
+ idval = self.matched(2)
+ if match_id == 'id-ref':
+ idval = cib_factory.resolve_id_ref('operations', idval)
+
+ node.set(match_id, idval)
+
+ # The ID assignment skips the operations node if possible,
+ # so we need to pass the prefix (id of the owner node)
+ # to match_op
+ pfx = out.get('id') or 'op'
+
+ while is_op():
+ self.match_op(node, pfx=pfx)
+
+
+_parsers = {}
+
+
+def parser_for(*lst):
+ def decorator(thing):
+ if inspect.isfunction(thing):
+ def parse(self, cmd):
+ return thing(self, cmd)
+ ret = type("Parser-" + '-'.join(lst), (BaseParser,), {'parse': parse})
+ else:
+ ret = thing
+ ret.can_parse = lst
+ for x in lst:
+ _parsers[x] = ret()
+ return ret
+ return decorator
+
+
+@parser_for('node')
+def parse_node(self, cmd):
+ """
+ node [<id>:|$id=<id>] <uname>[:<type>]
+ [description=<description>]
+ [attributes <param>=<value> [<param>=<value>...]]
+ [utilization <param>=<value> [<param>=<value>...]]
+
+ type :: normal | member | ping | remote
+ """
+ self.begin(cmd, min_args=1)
+ self.match('node')
+ out = xmlutil.new('node')
+ xmlutil.maybe_set(out, "id", self.try_match_initial_id() and self.matched(1))
+ self.match(_UNAME_RE, errmsg="Expected uname[:type]")
+ out.set("uname", self.matched(1))
+ if validator.node_type_optional():
+ xmlutil.maybe_set(out, "type", self.matched(3))
+ else:
+ out.set("type", self.matched(3) or constants.node_default_type)
+ xmlutil.maybe_set(out, "description", self.try_match_description())
+ self.match_arguments(out, {'attributes': 'instance_attributes',
+ 'utilization': 'utilization'},
+ implicit_initial='attributes')
+ return out
+
+
+@parser_for('primitive', 'group', 'clone', 'ms', 'master', 'rsc_template', 'bundle')
+class ResourceParser(BaseParser):
+ def match_ra_type(self, out):
+ "[<class>:[<provider>:]]<type>"
+ if not self.current_token():
+ self.err("Expected resource type")
+ cpt = validator.class_provider_type(self.current_token())
+ if not cpt:
+ self.err("Unknown resource type")
+ self.match_any()
+ xmlutil.maybe_set(out, 'class', cpt[0])
+ xmlutil.maybe_set(out, 'provider', cpt[1])
+ xmlutil.maybe_set(out, 'type', cpt[2])
+
+ def parse(self, cmd):
+ return self.begin_dispatch(cmd, min_args=2)
+
+ def _primitive_or_template(self):
+ """
+ primitive <rsc> {[<class>:[<provider>:]]<type>|@<template>]
+ [params attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value> ...] ...]
+
+ attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val> ...] | $id-ref=<id>
+ id_spec :: $id=<id> | $id-ref=<id>
+ op_type :: start | stop | monitor
+ """
+ t = self.matched(0).lower()
+ if t == 'primitive':
+ out = xmlutil.new('primitive')
+ else:
+ out = xmlutil.new('template')
+ out.set('id', self.match_identifier())
+ if t == 'primitive' and self.try_match(_TEMPLATE_RE):
+ out.set('template', self.matched(1))
+ else:
+ self.match_ra_type(out)
+ xmlutil.maybe_set(out, 'description', self.try_match_description())
+ self.match_arguments(out, {'params': 'instance_attributes',
+ 'meta': 'meta_attributes',
+ 'utilization': 'utilization',
+ 'operations': 'operations',
+ 'op': 'op'}, implicit_initial='params')
+ return out
+
+ parse_primitive = _primitive_or_template
+ parse_rsc_template = _primitive_or_template
+
+ def _master_or_clone(self):
+ if self.matched(0).lower() == 'clone':
+ out = xmlutil.new('clone')
+ else:
+ out = xmlutil.new('master')
+ out.set('id', self.match_identifier())
+
+ child = xmlutil.new('crmsh-ref', id=self.match_resource())
+ xmlutil.maybe_set(out, 'description', self.try_match_description())
+ self.match_arguments(out, {'params': 'instance_attributes',
+ 'meta': 'meta_attributes'}, implicit_initial='params')
+ out.append(child)
+ return out
+
+ parse_master = _master_or_clone
+ parse_ms = _master_or_clone
+ parse_clone = _master_or_clone
+
+ def _try_group_resource(self):
+ t = self.current_token()
+ if (not t) or ('=' in t) or (t.lower() in ('params', 'meta')):
+ return None
+ return self.match_any()
+
+ def parse_group(self):
+ out = xmlutil.new('group')
+ out.set('id', self.match_identifier())
+ children = []
+ while self._try_group_resource():
+ child = self.lastmatch()
+ if child in children:
+ self.err("child %s listed more than once in group %s" %
+ (child, out.get('id')))
+ children.append(child)
+ xmlutil.maybe_set(out, 'description', self.try_match_description())
+ self.match_arguments(out, {'params': 'instance_attributes',
+ 'meta': 'meta_attributes'},
+ implicit_initial='params')
+ for child in children:
+ xmlutil.child(out, 'crmsh-ref', id=child)
+ return out
+
+ def parse_bundle(self):
+ out = xmlutil.new('bundle')
+ out.set('id', self.match_identifier())
+ xmlutil.maybe_set(out, 'description', self.try_match_description())
+ self.match_arguments(out, {'docker': 'docker',
+ 'rkt': 'rkt',
+ 'network': 'network',
+ 'port-mapping': 'port-mapping',
+ 'storage': 'storage',
+ 'meta': 'meta_attributes',
+ 'primitive': 'primitive'})
+ return out
+
+
+@parser_for('location', 'colocation', 'collocation', 'order', 'rsc_ticket')
+class ConstraintParser(BaseParser):
+ def parse(self, cmd):
+ return self.begin_dispatch(cmd, min_args=2)
+
+ def parse_location(self):
+ """
+ location <id> <rsc> [[$]<attribute>=<value>] <score>: <node>
+ location <id> <rsc> [[$]<attribute>=<value>] <rule> [<rule> ...]
+ rsc :: /<rsc-pattern>/
+ | { <rsc-set> }
+ | <rsc>
+ attribute :: role | resource-discovery
+ """
+ out = xmlutil.new('rsc_location', id=self.match_identifier())
+ if self.try_match('^/(.+)/$'):
+ out.set('rsc-pattern', self.matched(1))
+ elif self.try_match('{'):
+ tokens = self.match_until('}')
+ self.match('}')
+ if not tokens:
+ self.err("Empty resource set")
+ parser = ResourceSet('role', tokens, self)
+ for rscset in parser.parse():
+ out.append(rscset)
+ else:
+ out.set('rsc', self.match_resource())
+
+ while self.try_match(_ATTR_RE):
+ name = self.matched(1)
+ value = handle_role_for_ocf_1_1(self.matched(2), name=name)
+ out.set(name, value)
+
+ # not sure this is necessary after parse _ATTR_RE in a while loop
+ if self.try_match(_ROLE_RE) or self.try_match(_ROLE2_RE):
+ out.set('role', handle_role_for_ocf_1_1(self.matched(1)))
+
+ score = False
+ if self.try_match(_SCORE_RE):
+ score = True
+ out.set(*self.validate_score(self.matched(1)))
+ out.set('node', self.match_identifier())
+ # backwards compatibility: role used to be read here
+ if 'role' not in out:
+ if self.try_match(_ROLE_RE) or self.try_match(_ROLE2_RE):
+ out.set('role', handle_role_for_ocf_1_1(self.matched(1)))
+ if not score:
+ rules = self.match_rules()
+ out.extend(rules)
+ if not rules:
+ self.err("expected <score>: <node> or <rule> [<rule> ...]")
+ return out
+
+ def parse_colocation(self):
+ """
+ colocation <id> <score>: <rsc>[:<role>] <rsc>[:<role>] ...
+ [node-attribute=<node_attr>]
+ """
+ out = xmlutil.new('rsc_colocation', id=self.match_identifier())
+ self.match(_SCORE_RE, errmsg="Expected <score>:")
+ out.set(*self.validate_score(self.matched(1)))
+ if self.try_match_tail('node-attribute=(.+)$'):
+ out.set('node-attribute', self.matched(1).lower())
+ self.try_match_rscset(out, 'role')
+ return out
+
+ parse_collocation = parse_colocation
+
+ def parse_order(self):
+ '''
+ order <id> [kind] <rsc>[:<action>] <rsc>[:<action>] ...
+ [symmetrical=<bool>]
+
+ kind :: Mandatory | Optional | Serialize
+ '''
+ out = xmlutil.new('rsc_order', id=self.match_identifier())
+ if self.try_match('(%s):$' % ('|'.join(validator.rsc_order_kinds()))):
+ out.set('kind', validator.canonize(
+ self.matched(1), validator.rsc_order_kinds()))
+ elif self.try_match(_SCORE_RE):
+ out.set(*self.validate_score(self.matched(1), noattr=True, to_kind=True))
+ if self.try_match_tail('symmetrical=(true|false|yes|no|on|off)$'):
+ out.set('symmetrical', canonical_boolean(self.matched(1)))
+ self.try_match_rscset(out, 'action')
+ return out
+
+ def parse_rsc_ticket(self):
+ '''
+ rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]
+
+ loss_policy_action :: stop | demote | fence | freeze
+ '''
+ out = xmlutil.new('rsc_ticket', id=self.match_identifier())
+ self.match(_SCORE_RE, errmsg="Expected <ticket-id>:")
+ out.set('ticket', self.matched(1))
+ if self.try_match_tail('loss-policy=(stop|demote|fence|freeze)$'):
+ out.set('loss-policy', self.matched(1))
+ self.try_match_rscset(out, 'role', simple_count=1)
+ return out
+
+ def try_match_rscset(self, out, suffix_type, simple_count=2):
+ simple, resources = self.match_resource_set(suffix_type, simple_count=simple_count)
+ if simple:
+ for n, v in resources:
+ out.set(n, v)
+ elif resources:
+ for rscset in resources:
+ out.append(rscset)
+ else:
+ def repeat(v, n):
+ for _ in range(0, n):
+ yield v
+ self.err("Expected %s | resource_sets" %
+ " ".join(repeat("<rsc>[:<%s>]" % (suffix_type), simple_count)))
+
+ def try_match_tail(self, rx):
+ "ugly hack to prematurely extract a tail attribute"
+ pos = self._currtok
+ self._currtok = len(self._cmd) - 1
+ ret = self.try_match(rx)
+ if ret:
+ self._cmd = self._cmd[:-1]
+ self._currtok = pos
+ return ret
+
+ def remaining_tokens(self):
+ return len(self._cmd) - self._currtok
+
+ def match_resource_set(self, suffix_type, simple_count=2):
+ simple = False
+ if self.remaining_tokens() == simple_count:
+ simple = True
+ if suffix_type == 'role':
+ return True, self.match_simple_role_set(simple_count)
+ else:
+ return True, self.match_simple_action_set()
+ tokens = self.match_rest()
+ parser = ResourceSet(suffix_type, tokens, self)
+ return simple, parser.parse()
+
+ def _fmt(self, info, name):
+ if info[1]:
+ return [[name, info[0]], [name + '-' + info[2], info[1]]]
+ return [[name, info[0]]]
+
+ def _split_setref(self, typename, classifier):
+ rsc, typ = self.match_split()
+ typ, t = classifier(handle_role_for_ocf_1_1(typ, name=typename))
+ if typ and not t:
+ self.err("Invalid %s '%s' for '%s'" % (typename, typ, rsc))
+ return rsc, typ, t
+
+ def match_simple_role_set(self, count):
+ ret = self._fmt(self._split_setref('role', validator.classify_role), 'rsc')
+ if count == 2:
+ ret += self._fmt(self._split_setref('role', validator.classify_role), 'with-rsc')
+ return ret
+
+ def match_simple_action_set(self):
+ ret = self._fmt(self._split_setref('action', validator.classify_action), 'first')
+ return ret + self._fmt(self._split_setref('action', validator.classify_action), 'then')
+
+
+@parser_for('monitor')
+class OpParser(BaseParser):
+ def parse(self, cmd):
+ return self.begin_dispatch(cmd, min_args=2)
+
+ def parse_monitor(self):
+ out = xmlutil.new('op', name="monitor")
+ resource, role = self.match_split()
+ if role:
+ role, role_class = validator.classify_role(role)
+ if not role_class:
+ self.err("Invalid role '%s' for resource '%s'" % (role, resource))
+ out.set(role_class, role)
+ out.set('rsc', resource)
+ interval, timeout = self.match_split()
+ xmlutil.maybe_set(out, 'interval', interval)
+ xmlutil.maybe_set(out, 'timeout', timeout)
+ return out
+
+
+@parser_for('property', 'rsc_defaults', 'op_defaults')
+def property_parser(self, cmd):
+ """
+ property = <cluster_property_set>...</>
+ rsc_defaults = <rsc_defaults><meta_attributes>...</></>
+ op_defaults = <op_defaults><meta_attributes>...</></>
+ """
+ from .cibconfig import cib_factory
+
+ setmap = {'property': 'cluster_property_set',
+ 'rsc_defaults': 'meta_attributes',
+ 'op_defaults': 'meta_attributes'}
+ self.begin(cmd, min_args=1)
+ self.match('(%s)$' % '|'.join(self.can_parse))
+ if self.matched(1) in constants.defaults_tags:
+ root = xmlutil.new(self.matched(1))
+ attrs = xmlutil.child(root, setmap[self.matched(1)])
+ else: # property -> cluster_property_set
+ root = xmlutil.new(setmap[self.matched(1)])
+ attrs = root
+ if self.try_match_initial_id():
+ attrs.set('id', self.matched(1))
+ elif self.try_match_idspec():
+ idkey = self.matched(1)[1:]
+ idval = self.matched(2)
+ if idkey == 'id-ref':
+ idval = cib_factory.resolve_id_ref(attrs.tag, idval)
+ attrs.set(idkey, idval)
+ for rule in self.match_rules():
+ attrs.append(rule)
+ if self.ignore_empty:
+ res_list = self.match_nvpairs(minpairs=0)
+ else:
+ res_list = self.match_nvpairs(terminator=[], minpairs=0, allow_empty=False)
+ for nvp in res_list:
+ attrs.append(nvp)
+ return root
+
+
+@parser_for('fencing-topology', 'fencing_topology')
+class FencingOrderParser(BaseParser):
+ '''
+ <fencing-topology>
+ <fencing-level id=<id> target=<text> index=<+int> devices=r"\\w,\\w..."/>
+ </fencing-topology>
+
+ new:
+
+ from 1.1.14 on, target can be a node attribute value mapping:
+
+ attr:<name>=<value> maps to XML:
+
+ <fencing-topology>
+ <fencing-level id=<id> target-attribute=<text> target-value=<text>
+ index=<+int> devices=r"\\w,\\w..."/>
+ </fencing-topology>
+
+ from 1.1.14 on, target can be a regexp pattern:
+
+ pattern:<pattern> maps to XML:
+
+ <fencing-topology>
+ <fencing-level id=<id> target-pattern=<pattern>
+ index=<+int> devices=r"\\w,\\w..."/>
+ </fencing-topology>
+
+ fencing-topology \
+ pcmk-1: poison-pill power \
+ pcmk-2: disk,network power
+
+ '''
+ def parse(self, cmd):
+ self.begin(cmd)
+ if not self.try_match("fencing-topology"):
+ self.match("fencing_topology")
+ target = "@@"
+ # (target, devices)
+ raw_levels = []
+ while self.has_tokens():
+ if self.try_match(_TARGET_ATTR_RE):
+ target = (self.matched(1), self.matched(2))
+ elif self.try_match(_TARGET_PATTERN_RE):
+ target = (None, self.matched(1))
+ elif self.try_match(_TARGET_RE):
+ target = self.matched(1)
+ else:
+ raw_levels.append((target, self.match_any()))
+ return self._postprocess_levels(raw_levels)
+
+ def _postprocess_levels(self, raw_levels):
+ from collections import defaultdict
+ from itertools import repeat
+ from .cibconfig import cib_factory
+ if len(raw_levels) == 0:
+ def no_levels():
+ return []
+ lvl_generator = no_levels
+ elif raw_levels[0][0] == "@@":
+ def node_levels():
+ for node in cib_factory.node_id_list():
+ for target, devices in raw_levels:
+ yield node, devices
+ lvl_generator = node_levels
+ else:
+ def wrap_levels():
+ return raw_levels
+ lvl_generator = wrap_levels
+
+ out = xmlutil.new('fencing-topology')
+ targets = defaultdict(repeat(1).__next__)
+ for target, devices in lvl_generator():
+ if isinstance(target, tuple):
+ if target[0] is None:
+ # pattern
+ c = xmlutil.child(out, 'fencing-level',
+ index=str(targets[target[1]]),
+ devices=devices)
+ c.set('target-pattern', target[1])
+ targets[target[1]] += 1
+ else:
+ c = xmlutil.child(out, 'fencing-level',
+ index=str(targets[target[0]]),
+ devices=devices)
+ c.set('target-attribute', target[0])
+ c.set('target-value', target[1])
+ targets[target[0]] += 1
+ else:
+ xmlutil.child(out, 'fencing-level',
+ target=target,
+ index=str(targets[target]),
+ devices=devices)
+ targets[target] += 1
+
+ return out
+
+
+@parser_for('tag')
+def parse_tag(self, cmd):
+ """
+ <tag id=id>
+ <obj_ref id=rsc/>
+ ...
+ </tag>
+ """
+ self.begin(cmd, min_args=2)
+ self.match('tag')
+ self.match(_TAG_RE, errmsg="Expected tag name")
+ out = xmlutil.new('tag', id=self.matched(1))
+ while self.has_tokens():
+ e = xmlutil.new('obj_ref', id=self.match_resource())
+ out.append(e)
+ if len(out) == 0:
+ self.err("Expected at least one resource")
+ return out
+
+
+@parser_for('user', 'role', 'acl_target', 'acl_group')
+class AclParser(BaseParser):
+ def parse(self, cmd):
+ return self.begin_dispatch(cmd, min_args=2)
+
+ def parse_user(self):
+ out = xmlutil.new('acl_user')
+ out.set('id', self.match_identifier())
+ while self.has_tokens():
+ # role identifier
+ if self.try_match(_ROLE_REF_RE):
+ xmlutil.child(out, 'role_ref', id=self.matched(1))
+ # acl right rule
+ else:
+ out.append(self._add_rule())
+ return out
+
+ def parse_acl_target(self):
+ out = xmlutil.new('acl_target')
+ out.set('id', self.match_identifier())
+ while self.has_tokens():
+ xmlutil.child(out, 'role', id=self.match_identifier())
+ return out
+
+ def parse_acl_group(self):
+ out = xmlutil.new('acl_group')
+ out.set('id', self.match_identifier())
+ while self.has_tokens():
+ xmlutil.child(out, 'role', id=self.match_identifier())
+ return out
+
+ def parse_role(self):
+ out = xmlutil.new('acl_role')
+ out.set('id', self.match_identifier())
+
+ if validator.acl_2_0():
+ xmlutil.maybe_set(out, "description", self.try_match_description())
+ while self.has_tokens():
+ self._add_permission(out)
+ else:
+ while self.has_tokens():
+ out.append(self._add_rule())
+ return out
+
+ def _is_permission(self, val):
+ def permission(x):
+ return x in constants.acl_spec_map_2 or x in constants.acl_shortcuts
+ x = val.split(':', 1)
+ return len(x) > 0 and permission(x[0])
+
+ def _add_permission(self, out):
+ rule = xmlutil.new('acl_permission')
+ rule.set('kind', self.match(_ACL_RIGHT_RE).lower())
+ if self.try_match_initial_id():
+ rule.set('id', self.matched(1))
+ xmlutil.maybe_set(rule, "description", self.try_match_description())
+
+ attributes = {}
+
+ while self.has_tokens():
+ if not self._is_permission(self.current_token()):
+ break
+ self.match(_PERM_RE, errmsg="Expected <type>:<spec>")
+ typ = self.matched(1)
+ typ = constants.acl_spec_map_2.get(typ, typ)
+ val = self.matched(2)
+ if typ in constants.acl_shortcuts:
+ typ, val = self._expand_shortcuts_2(typ, val)
+ elif val is None:
+ self.err("Expected <type>:<spec>")
+ attributes[typ] = val
+ # valid combinations of rule attributes:
+ # xpath
+ # reference
+ # object-type + attribute
+ # split other combinations here
+ from copy import deepcopy
+ if 'xpath' in attributes:
+ rule2 = deepcopy(rule)
+ rule2.set('xpath', attributes['xpath'])
+ out.append(rule2)
+ if 'reference' in attributes:
+ rule2 = deepcopy(rule)
+ rule2.set('reference', attributes['reference'])
+ out.append(rule2)
+ if 'object-type' in attributes:
+ rule2 = deepcopy(rule)
+ rule2.set('object-type', attributes['object-type'])
+ if 'attribute' in attributes:
+ rule2.set('attribute', attributes['attribute'])
+ out.append(rule2)
+ if 'attribute' in attributes and 'object-type' not in attributes:
+ self.err("attribute is only valid in combination with tag/object-type")
+
+ def _add_rule(self):
+ rule = xmlutil.new(self.match(_ACL_RIGHT_RE).lower())
+ eligible_specs = list(constants.acl_spec_map.values())
+ while self.has_tokens():
+ a = self._expand_shortcuts(self.current_token().split(':', 1))
+ if len(a) != 2 or a[0] not in eligible_specs:
+ break
+ self.match_any()
+ rule.set(a[0], a[1])
+ if self._remove_spec(eligible_specs, a[0]):
+ break
+ return rule
+
+ def _remove_spec(self, speclist, spec):
+ """
+ Remove spec from list of eligible specs.
+ Returns true if spec parse is complete.
+ """
+ try:
+ speclist.remove(spec)
+ if spec == 'xpath':
+ speclist.remove('ref')
+ speclist.remove('tag')
+ elif spec in ('ref', 'tag'):
+ speclist.remove('xpath')
+ else:
+ return True
+ except ValueError:
+ pass
+ return False
+
+ def _remove_spec_2(self, speclist, spec):
+ """
+ Remove spec from list of eligible specs.
+ Returns true if spec parse is complete.
+ """
+ try:
+ speclist.remove(spec)
+ if spec == 'xpath':
+ speclist.remove('reference')
+ speclist.remove('object-type')
+ elif spec in ('reference', 'object-type'):
+ speclist.remove('xpath')
+ else:
+ return True
+ except ValueError:
+ pass
+ return False
+
+ def _expand_shortcuts_2(self, typ, val):
+ '''
+ expand xpath shortcuts: the typ prefix names the shortcut
+ '''
+ expansion = constants.acl_shortcuts[typ]
+ if val is None:
+ if '@@' in expansion[0]:
+ self.err("Missing argument to ACL shortcut %s" % (typ))
+ return 'xpath', expansion[0]
+ a = val.split(':')
+ xpath = ""
+ exp_i = 0
+ for tok in a:
+ try:
+ # some expansions may contain no id placeholders
+ # of course, they don't consume input tokens
+ if '@@' not in expansion[exp_i]:
+ xpath += expansion[exp_i]
+ exp_i += 1
+ xpath += expansion[exp_i].replace('@@', tok)
+ exp_i += 1
+ except:
+ return []
+ # need to remove backslash chars which were there to escape
+ # special characters in expansions when used as regular
+ # expressions (mainly '[]')
+ val = xpath.replace("\\", "")
+ return 'xpath', val
+
+ def _expand_shortcuts(self, l):
+ '''
+ Expand xpath shortcuts. The input list l contains the user
+ input. If no shortcut was found, just return l.
+ In case of syntax error, return empty list. Otherwise, l[0]
+ contains 'xpath' and l[1] the expansion as found in
+ constants.acl_shortcuts. The id placeholders '@@' are replaced
+ with the given attribute names or resource references.
+ '''
+ try:
+ expansion = constants.acl_shortcuts[l[0]]
+ except KeyError:
+ return l
+ l[0] = "xpath"
+ if len(l) == 1:
+ if '@@' in expansion[0]:
+ return []
+ l.append(expansion[0])
+ return l
+ a = l[1].split(':')
+ xpath = ""
+ exp_i = 0
+ for tok in a:
+ try:
+ # some expansions may contain no id placeholders
+ # of course, they don't consume input tokens
+ if '@@' not in expansion[exp_i]:
+ xpath += expansion[exp_i]
+ exp_i += 1
+ xpath += expansion[exp_i].replace('@@', tok)
+ exp_i += 1
+ except:
+ return []
+ # need to remove backslash chars which were there to escape
+ # special characters in expansions when used as regular
+ # expressions (mainly '[]')
+ l[1] = xpath.replace("\\", "")
+ return l
+
+
+@parser_for('xml')
+def parse_xml(self, cmd):
+ self.begin(cmd, min_args=1)
+ self.match('xml')
+ if not self.has_tokens():
+ self.err("Expected XML data")
+ xml_data = ' '.join(self.match_rest())
+ # strip spaces between elements
+ # they produce text elements
+ try:
+ e = etree.fromstring(xml_data)
+ except Exception as err:
+ logger.error("Cannot parse XML data: %s" % xml_data)
+ self.err(err)
+ if e.tag not in constants.cib_cli_map:
+ self.err("Element %s not recognized" % (e.tag))
+ return e
+
+
+@parser_for('alert')
+def parse_alert(self, cmd):
+ """
+ <alerts>
+ <alert id=ID path=PATH>
+ <recipient id=RID value=VALUE/>
+ <meta_attributes ..>
+ <instance_attributes ..>
+ ...
+
+ meta attributes "timeout", "tstamp_format"
+ </tag>
+
+ alert ID PATH [attributes ...] [meta ...] [to [{] recipient [}] ...]
+ recipient :: PATH [attributes ...] [meta ...]
+ """
+ self.begin(cmd, min_args=2)
+ self.match('alert')
+ alertid = self.match_identifier()
+ path = self.match(_ALERT_PATH_RE, errmsg="Expected path")
+ out = xmlutil.new('alert', id=alertid, path=path)
+ desc = self.try_match_description()
+ if desc is not None:
+ out.attrib['description'] = desc
+ rcount = 1
+ root_selector = [None]
+
+ def wrap_select(tag):
+ if tag[0] is None:
+ tag[0] = xmlutil.child(out, 'select')
+ return tag[0]
+
+ while self.has_tokens():
+ if self.current_token() in ('attributes', 'meta'):
+ self.match_arguments(out, {'attributes': 'instance_attributes',
+ 'meta': 'meta_attributes'},
+ terminator=['attributes', 'meta', 'to', 'select'])
+ continue
+ if self.current_token() == 'select':
+ selector_types = ('nodes', 'fencing', 'resources', 'attributes')
+ self.match('select')
+ root_selector[0] = None
+ while self.current_token() in selector_types:
+ selector = self.match_identifier()
+ if selector == 'attributes':
+ if not self.try_match('{'):
+ self.rewind()
+ break
+ seltag = xmlutil.child(wrap_select(root_selector), 'select_{}'.format(selector))
+ if selector == 'attributes':
+ while self.current_token() != '}':
+ name = self.match_identifier()
+ xmlutil.child(seltag, 'attribute', name=name)
+ self.match('}')
+ continue
+ self.match('to')
+ rid = '%s-recipient-%s' % (alertid, rcount)
+ rcount += 1
+ bracer = self.try_match('{')
+ elem = xmlutil.new('recipient', id=rid, value=self.match_any())
+ desc = self.try_match_description()
+ terminators = ['attributes', 'meta', 'to']
+ if bracer:
+ terminators.append('}')
+ if desc is not None:
+ elem.attrib['description'] = desc
+ self.match_arguments(elem, {'attributes': 'instance_attributes',
+ 'meta': 'meta_attributes'},
+ terminator=terminators)
+ if bracer:
+ self.match('}')
+ out.append(elem)
+ return out
+
+
+class ResourceSet(object):
+ '''
+ Constraint resource set parser. Parses sth like:
+ a ( b c:start ) d:Master e ...
+ Appends one or more resource sets to cli_list.
+ Resource sets are in form:
+ <resource_set [sequential=false] [require-all=false] [action=<action>] [role=<role>]>
+ <resource_ref id="<rsc>"/>
+ ...
+ </resource_set>
+ Action/role change makes a new resource set.
+ '''
+ open_set = ('(', '[')
+ close_set = (')', ']')
+ matching = {
+ '[': ']',
+ '(': ')',
+ }
+
+ def __init__(self, q_attr, s, parent):
+ self.parent = parent
+ self.q_attr = q_attr
+ self.tokens = s
+ self.cli_list = []
+ self.reset_set()
+ self.opened = ''
+ self.sequential = True
+ self.require_all = True
+ self.fix_parentheses()
+
+ def fix_parentheses(self):
+ newtoks = []
+ for p in self.tokens:
+ if p[0] in self.open_set and len(p) > 1:
+ newtoks.append(p[0])
+ newtoks.append(p[1:])
+ elif p[len(p)-1] in self.close_set and len(p) > 1:
+ newtoks.append(p[0:len(p)-1])
+ newtoks.append(p[len(p)-1])
+ else:
+ newtoks.append(p)
+ self.tokens = newtoks
+
+ def reset_set(self):
+ self.set_pl = xmlutil.new("resource_set")
+ self.prev_q = '' # previous qualifier (action or role)
+ self.curr_attr = '' # attribute (action or role)
+
+ def save_set(self):
+ if not len(self.set_pl):
+ return
+ if not self.require_all:
+ self.set_pl.set("require-all", "false")
+ if not self.sequential:
+ self.set_pl.set("sequential", "false")
+ if self.curr_attr:
+ self.set_pl.set(self.curr_attr, self.prev_q)
+ self.make_resource_set()
+ self.reset_set()
+
+ def make_resource_set(self):
+ self.cli_list.append(self.set_pl)
+
+ def parseattr(self, p, tokpos):
+ attrs = {"sequential": "sequential",
+ "require-all": "require_all"}
+ l = p.split('=')
+ if len(l) != 2:
+ self.err('Extra = in %s' % (p),
+ token=self.tokens[tokpos])
+ if l[0] not in attrs:
+ self.err('Unknown attribute',
+ token=self.tokens[tokpos])
+ k, v = l
+ if not verify_boolean(v):
+ self.err('Not a boolean: %s' % (v),
+ token=self.tokens[tokpos])
+ setattr(self, attrs[k], get_boolean(v))
+ return True
+
+ def splitrsc(self, p):
+ l = p.split(':')
+ if len(l) == 2:
+ if self.q_attr == 'action':
+ l[1] = validator.canonize(
+ l[1],
+ validator.resource_actions())
+ else:
+ l[1] = validator.canonize(
+ handle_role_for_ocf_1_1(l[1]),
+ validator.resource_roles())
+ if not l[1]:
+ self.err('Invalid %s for %s' % (self.q_attr, p))
+ elif len(l) == 1:
+ l = [p, '']
+ return l
+
+ def err(self, errmsg, token=''):
+ self.parent.err(msg=errmsg, context=self.q_attr, token=token)
+
+ def update_attrs(self, bracket, tokpos):
+ if bracket in ('(', '['):
+ if self.opened:
+ self.err('Cannot nest resource sets',
+ token=self.tokens[tokpos])
+ self.sequential = False
+ if bracket == '[':
+ self.require_all = False
+ self.opened = bracket
+ elif bracket in (')', ']'):
+ if not self.opened:
+ self.err('Unmatched closing bracket',
+ token=self.tokens[tokpos])
+ if bracket != self.matching[self.opened]:
+ self.err('Mismatched closing bracket',
+ token=self.tokens[tokpos])
+ self.sequential = True
+ self.require_all = True
+ self.opened = ''
+
+ def parse(self):
+ tokpos = -1
+ for p in self.tokens:
+ tokpos += 1
+ if p == "_rsc_set_":
+ continue # a degenerate resource set
+ if p in self.open_set:
+ self.save_set()
+ self.update_attrs(p, tokpos)
+ continue
+ if p in self.close_set:
+ # empty sets not allowed
+ if not len(self.set_pl):
+ self.err('Empty resource set',
+ token=self.tokens[tokpos])
+ self.save_set()
+ self.update_attrs(p, tokpos)
+ continue
+ if '=' in p:
+ self.parseattr(p, tokpos)
+ continue
+ rsc, q = self.splitrsc(p)
+ if q != self.prev_q: # one set can't have different roles/actions
+ self.save_set()
+ self.prev_q = q
+ if q:
+ if not self.curr_attr:
+ self.curr_attr = self.q_attr
+ else:
+ self.curr_attr = ''
+ self.set_pl.append(xmlutil.new("resource_ref", id=rsc))
+ if self.opened: # no close
+ self.err('Unmatched opening bracket',
+ token=self.tokens[tokpos])
+ if len(self.set_pl): # save the final set
+ self.save_set()
+ ret = self.cli_list
+ self.cli_list = []
+ return ret
+
+
+def parse(s, comments=None, ignore_empty=True, complete_advised=False):
+ '''
+ Input: a list of tokens (or a CLI format string).
+ Return: a cibobject
+ On failure, returns either False or None.
+ comments holds comment state between parses
+ Handles basic normalization of the input string.
+ Converts unicode to ascii, XML data to CLI format,
+ lexing etc.
+ '''
+ if comments is None:
+ comments = []
+
+ if isinstance(s, str):
+ try:
+ s = s.encode('ascii', errors='xmlcharrefreplace')
+ s = s.decode('utf-8')
+ except Exception as e:
+ logger.error(e)
+ return False
+ if isinstance(s, str):
+ if s and s.startswith('#'):
+ comments.append(s)
+ return None
+ if s.startswith('xml'):
+ try:
+ s = [x for p in lines2cli(s) for x in p.split()]
+ except ValueError as e:
+ logger.error(e)
+ return False
+ else:
+ s = shlex.split(s)
+ # but there shouldn't be any newlines (?)
+ while '\n' in s:
+ s.remove('\n')
+ if s:
+ s[0] = s[0].lower()
+ if not s:
+ return s
+ kw = s[0]
+ parser = _parsers.get(kw)
+ if parser is None:
+ logger_utils.syntax_err(s, token=s[0], msg="Unknown command")
+ return False
+
+ try:
+ ret = parser.do_parse(s, ignore_empty, complete_advised)
+ if ret is not None and len(comments) > 0:
+ if ret.tag in constants.defaults_tags:
+ xmlutil.stuff_comments(ret[0], comments)
+ else:
+ xmlutil.stuff_comments(ret, comments)
+ del comments[:]
+ return ret
+ except ParseError:
+ return False
+
+
+def score_to_kind(score):
+ """
+ Convert score to kind for rsc_order
+ """
+ return "Optional" if score == "0" else "Mandatory"
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/prun/__init__.py b/crmsh/prun/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crmsh/prun/__init__.py
diff --git a/crmsh/prun/prun.py b/crmsh/prun/prun.py
new file mode 100644
index 0000000..a675431
--- /dev/null
+++ b/crmsh/prun/prun.py
@@ -0,0 +1,283 @@
+# prun.py - run command or copy files on multiple hosts concurrently
+import os
+import socket
+import tempfile
+import typing
+
+import crmsh.constants
+import crmsh.userdir
+from crmsh.prun.runner import Task, Runner
+from crmsh.user_of_host import UserOfHost
+from crmsh.sh import Utils
+
+_DEFAULT_CONCURRENCY = 32
+
+
+class ProcessResult:
+ def __init__(self, returncode: int, stdout: bytes, stderr: bytes):
+ self.returncode = returncode
+ self.stdout = stdout
+ self.stderr = stderr
+
+
+class PRunError(Exception):
+ """Base exception class for all error in prun module."""
+ def __init__(self, user, host, *args):
+ super().__init__(*args)
+ self.user = user
+ self.host = host
+
+
+class SSHError(PRunError):
+ def __init__(self, user, host, msg):
+ super().__init__(user, host, f"Cannot create SSH connection to {user}@{host}: {msg}")
+
+
+class TimeOutError(PRunError):
+ def __init__(self, user, host):
+ super().__init__(user, host, f"Timed out on {user}@{host}.")
+
+
+class PRunInterceptor:
+ def task(self, task: Task) -> Task:
+ return task
+ def result(self, result: ProcessResult) -> ProcessResult:
+ return result
+ def exception(self, exc: PRunError) -> PRunError:
+ return exc
+
+
+def prun(
+ host_cmdline: typing.Mapping[str, str],
+ *,
+ timeout_seconds: int = -1,
+ concurrency: int = _DEFAULT_CONCURRENCY,
+ interceptor: PRunInterceptor = PRunInterceptor(),
+) -> typing.Dict[str, typing.Union[ProcessResult, SSHError]]:
+ """Run a command on multiple hosts concurrently.
+
+ Args:
+ host_cmdline: A mapping from hosts to command lines to be run on that host.
+ timeout_seconds: (optional) The maximum number of seconds to wait for all the commands to complete.
+ concurrency: (optional) The maximum number of commands to be run concurrently.
+ interceptor: (optional) An interceptor that can modify the inputs of tasks before they are run,
+ and the results after they are finished.
+
+ Returns:
+ A mapping from the host to the results of the command run on that host.
+ """
+ tasks = [_build_run_task(host, cmdline) for host, cmdline in host_cmdline.items()]
+ runner = Runner(concurrency)
+ for task in tasks:
+ task = interceptor.task(task)
+ runner.add_task(task)
+ runner.run(timeout_seconds)
+ return {task.context['host']: _handle_run_result(task, interceptor) for task in tasks}
+
+
+def prun_multimap(
+ host_cmdline: typing.Sequence[typing.Tuple[str, str]],
+ *,
+ concurrency: int = _DEFAULT_CONCURRENCY,
+ timeout_seconds: int = -1,
+ interceptor: PRunInterceptor = PRunInterceptor(),
+) -> typing.Sequence[typing.Tuple[str, typing.Union[ProcessResult, SSHError]]]:
+ """A varient of prun that allow run multiple commands on the same host."""
+ tasks = [_build_run_task(host, cmdline) for host, cmdline in host_cmdline]
+ runner = Runner(concurrency)
+ for task in tasks:
+ task = interceptor.task(task)
+ runner.add_task(task)
+ runner.run(timeout_seconds)
+ return [
+ (task.context['host'], _handle_run_result(task, interceptor))
+ for task in tasks
+ ]
+
+
+def _build_run_task(remote: str, cmdline: str) -> Task:
+ if _is_local_host(remote):
+ if 0 == os.geteuid():
+ args = ['/bin/sh']
+ remote_sudoer = 'root'
+ else:
+ remote_sudoer = crmsh.userdir.get_sudoer()
+ if remote_sudoer == crmsh.userdir.getuser():
+ args = ['sudo', '/bin/sh']
+ else:
+ raise AssertionError('trying to run sudo as a non-root user')
+ return Task(
+ args,
+ cmdline.encode('utf-8'),
+ stdout=Task.Capture,
+ stderr=Task.Capture,
+ context={"host": remote, "ssh_user": remote_sudoer},
+ )
+ else:
+ local_sudoer, remote_sudoer = UserOfHost.instance().user_pair_for_ssh(remote)
+ shell = 'ssh {} {}@{} sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION, remote_sudoer, remote)
+ if local_sudoer == crmsh.userdir.getuser():
+ args = ['/bin/sh', '-c', shell]
+ elif os.geteuid() == 0:
+ args = ['su', local_sudoer, '--login', '-c', shell]
+ else:
+ raise AssertionError('trying to run su as a non-root user')
+ return Task(
+ args,
+ cmdline.encode('utf-8'),
+ stdout=Task.Capture,
+ stderr=Task.Capture,
+ context={"host": remote, "ssh_user": remote_sudoer},
+ )
+
+
+def _handle_run_result(task: Task, interceptor: PRunInterceptor = PRunInterceptor()):
+ if task.returncode is None:
+ return interceptor.exception(TimeOutError(task.context['ssh_user'], task.context['host']))
+ elif task.returncode == 255:
+ return interceptor.exception(SSHError(task.context['ssh_user'], task.context['host'], Utils.decode_str(task.stderr)))
+ else:
+ return interceptor.result(ProcessResult(task.returncode, task.stdout, task.stderr))
+
+
+def pcopy_to_remote(
+ src: str,
+ hosts: typing.Sequence[str], dst: str,
+ recursive: bool = False,
+ *,
+ timeout_seconds: int = -1,
+ concurrency: int = _DEFAULT_CONCURRENCY,
+) -> typing.Dict[str, typing.Optional[PRunError]]:
+ """Copy file or directory from local to remote hosts concurrently."""
+ if src == dst:
+ # copy a file to itself will ruin the data
+ hosts_filtered = [x for x in hosts if not _is_local_host(x)]
+ if hosts_filtered:
+ hosts = hosts_filtered
+ else:
+ return {x: None for x in hosts}
+ flags = '-pr' if recursive else '-p'
+ local_sudoer, _ = UserOfHost.instance().user_pair_for_ssh(hosts[0])
+ script = "put {} '{}' '{}'\n".format(flags, src, dst)
+ ssh = None
+ try:
+ # sftp -S does not parse args, it accepts only a single executable. So we create one.
+ if local_sudoer == crmsh.userdir.getuser():
+ tasks = [_build_copy_task('', script, host) for host in hosts]
+ else:
+ ssh = tempfile.NamedTemporaryFile('w', encoding='utf-8', delete=False)
+ os.fchmod(ssh.fileno(), 0o700)
+ ssh.write(f'''#!/bin/sh
+exec sudo -u {local_sudoer} ssh "$@"''')
+ # It is necessary to close the file before executing, or we will get an EBUSY.
+ ssh.close()
+ tasks = [_build_copy_task("-S '{}'".format(ssh.name), script, host) for host in hosts]
+ runner = Runner(concurrency)
+ for task in tasks:
+ runner.add_task(task)
+ runner.run(timeout_seconds)
+ finally:
+ if ssh is not None:
+ os.unlink(ssh.name)
+ ssh.close()
+ return {task.context['host']: _parse_copy_result(task) for task in tasks}
+
+
+def _build_copy_task(ssh: str, script: str, host: str):
+ _, remote_sudoer = UserOfHost.instance().user_pair_for_ssh(host)
+ cmd = "sftp {} {} -o BatchMode=yes -s 'sudo PATH=/usr/lib/ssh:/usr/libexec/ssh /bin/sh -c \"exec sftp-server\"' -b - {}@{}".format(
+ ssh,
+ crmsh.constants.SSH_OPTION,
+ remote_sudoer, _enclose_inet6_addr(host),
+ )
+ return Task(
+ ['/bin/sh', '-c', cmd],
+ input=script.encode('utf-8'),
+ stdout=Task.Capture,
+ stderr=Task.Stdout,
+ context={"host": host, "ssh_user": remote_sudoer},
+ )
+
+
+def _parse_copy_result(task: Task) -> typing.Optional[PRunError]:
+ if task.returncode == 0:
+ return None
+ elif task.returncode == 255:
+ return SSHError(task.context['ssh_user'], task.context['host'], Utils.decode_str(task.stdout))
+ else:
+ return PRunError(task.context['ssh_user'], task.context['host'], Utils.decode_str(task.stdout))
+
+
+def pfetch_from_remote(
+ hosts: typing.Sequence[str], src: str,
+ dst: str,
+ recursive=False,
+ *,
+ concurrency: int = _DEFAULT_CONCURRENCY,
+) -> typing.Dict[str, typing.Union[str, PRunError]]:
+ """Copy files from remote hosts to local concurrently.
+
+ Files are copied to directory <dst>/<host>/ corresponding to each source host."""
+ flags = '-pR' if recursive else '-p'
+ local_sudoer, _ = UserOfHost.instance().user_pair_for_ssh(hosts[0])
+ ssh = None
+ try:
+ if local_sudoer == crmsh.userdir.getuser():
+ tasks = [_build_fetch_task('', host, src, dst, flags) for host in hosts]
+ else:
+ ssh = tempfile.NamedTemporaryFile('w', encoding='utf-8', delete=False)
+ os.fchmod(ssh.fileno(), 0o700)
+ ssh.write(f'''#!/bin/sh
+ exec sudo -u {local_sudoer} ssh "$@"''')
+ # It is necessary to close the file before executing
+ ssh.close()
+ tasks = [_build_fetch_task("-S '{}'".format(ssh.name), host, src, dst, flags) for host in hosts]
+ runner = Runner(concurrency)
+ for task in tasks:
+ runner.add_task(task)
+ runner.run()
+ finally:
+ if ssh is not None:
+ os.unlink(ssh.name)
+ ssh.close()
+ basename = os.path.basename(src)
+ return {
+ host: v if v is not None else f"{dst}/{host}/{basename}"
+ for host, v in ((task.context['host'], _parse_copy_result(task)) for task in tasks)
+ }
+
+
+def _build_fetch_task( ssh: str, host: str, src: str, dst: str, flags: str) -> Task:
+ _, remote_sudoer = UserOfHost.instance().user_pair_for_ssh(host)
+ cmd = "sftp {} {} -o BatchMode=yes -s 'sudo PATH=/usr/lib/ssh:/usr/libexec/ssh /bin/sh -c \"exec sftp-server\"' -b - {}@{}".format(
+ ssh,
+ crmsh.constants.SSH_OPTION,
+ remote_sudoer, _enclose_inet6_addr(host),
+ )
+ os.makedirs(f"{dst}/{host}", exist_ok=True)
+ return Task(
+ ['/bin/sh', '-c', cmd],
+ input='get {} "{}" "{}/{}/"\n'.format(flags, src, dst, host).encode('utf-8'),
+ stdout=Task.Capture,
+ stderr=Task.Stdout,
+ context={"host": host, "ssh_user": remote_sudoer},
+ )
+
+
+def _enclose_inet6_addr(addr: str):
+ if ':' in addr:
+ return f'[{addr}]'
+ else:
+ return addr
+
+
+def _is_local_host(host):
+ """
+ Check if the host is local
+ """
+ try:
+ socket.inet_aton(host)
+ hostname = socket.gethostbyaddr(host)[0]
+ except OSError:
+ hostname = host
+ return hostname == socket.gethostname()
diff --git a/crmsh/prun/runner.py b/crmsh/prun/runner.py
new file mode 100644
index 0000000..c90f686
--- /dev/null
+++ b/crmsh/prun/runner.py
@@ -0,0 +1,161 @@
+# runner.py - fork and exec multiple child processes concurrently
+import asyncio
+import fcntl
+import os
+import select
+import typing
+
+
+class Task:
+ """holding the inputs and outputs of a command."""
+ DevNull = 0
+ Stdout = 1
+ Capture = 2
+
+ class RedirectToFile:
+ def __init__(self, path):
+ self.path = path
+
+ def __init__(
+ self,
+ args: typing.Sequence[str],
+ input: typing.Optional[bytes] = None,
+ stdout: typing.Union[int, RedirectToFile] = DevNull,
+ stderr: typing.Union[int, RedirectToFile] = DevNull,
+ context=None,
+ ):
+ # Inputs
+ self.args = args
+ self.input = input
+ self.stdout_config = stdout
+ self.stderr_config = stderr
+ # Results
+ self.returncode: typing.Optional[int] = None
+ self.stdout: typing.Optional[bytes] = None
+ self.stderr: typing.Optional[bytes] = None
+ # Caller can pass arbitrary data to context, it is kept untouched.
+ self.context = context
+
+
+class Runner:
+ def __init__(self, concurrency):
+ self._concurrency_limiter = asyncio.Semaphore(concurrency)
+ self._tasks: typing.List[Task] = []
+
+ def add_task(self, task: Task):
+ self._tasks.append(task)
+
+ def run(self, timeout_seconds: int = -1):
+ awaitable = asyncio.gather(
+ *[
+ self._concurrency_limit(self._concurrency_limiter, self._run(task))
+ for task in self._tasks
+ ],
+ return_exceptions=True,
+ )
+ if timeout_seconds > 0:
+ awaitable = self._timeout_limit(timeout_seconds, awaitable)
+ return asyncio.get_event_loop().run_until_complete(awaitable)
+
+ async def _timeout_limit(self, timeout_seconds: int, awaitable: typing.Awaitable):
+ assert timeout_seconds > 0
+ try:
+ return await asyncio.wait_for(awaitable, timeout_seconds)
+ except asyncio.TimeoutError:
+ return self._tasks
+
+ @staticmethod
+ async def _concurrency_limit(semaphore: asyncio.Semaphore, coroutine: typing.Coroutine):
+ await semaphore.acquire()
+ try:
+ return await coroutine
+ finally:
+ semaphore.release()
+
+ @staticmethod
+ async def _run(task: Task):
+ wait_stdout_writer = None
+ if task.stdout_config == Task.DevNull:
+ stdout = asyncio.subprocess.DEVNULL
+ elif task.stdout_config == Task.Capture:
+ stdout = asyncio.subprocess.PIPE
+ elif isinstance(task.stdout_config, Task.RedirectToFile):
+ stdout, wait_stdout_writer = FileIOWriter.create(task.stdout_config.path)
+ else:
+ assert False
+
+ wait_stderr_writer = None
+ if task.stderr_config == Task.DevNull:
+ stderr = asyncio.subprocess.DEVNULL
+ elif task.stderr_config == Task.Stdout:
+ stderr = asyncio.subprocess.STDOUT
+ elif task.stderr_config == Task.Capture:
+ stderr = asyncio.subprocess.PIPE
+ elif isinstance(task.stderr_config, Task.RedirectToFile):
+ stderr, wait_stderr_writer = FileIOWriter.create(task.stderr_config.path)
+ else:
+ assert False
+
+ try:
+ try:
+ child = await asyncio.create_subprocess_exec(
+ *task.args,
+ stdin=asyncio.subprocess.PIPE if task.input else asyncio.subprocess.DEVNULL,
+ stdout=stdout,
+ stderr=stderr,
+ start_new_session=True,
+ )
+ finally:
+ # Closing the pipe inlet make the writer thread to exit.
+ if isinstance(stdout, typing.BinaryIO):
+ stdout.close()
+ if isinstance(stderr, typing.BinaryIO):
+ stderr.close()
+ if wait_stdout_writer is not None:
+ await wait_stdout_writer
+ if wait_stderr_writer is not None:
+ await wait_stderr_writer
+ task.stdout, task.stderr = await child.communicate(task.input)
+ task.returncode = child.returncode
+ except asyncio.CancelledError:
+ # Do not try to kill the child here. In a race condition, an unrelated process may be killed.
+ # Whether the task is canceled can be identified with task.returncode. No need to reraise.
+ pass
+ return task
+
+
+class FileIOWriter:
+ # Disk I/O is blocking. To make it to work with non-blocking I/O, a thread is created to write the file.
+ # The event loop thread send data needs to be written over a pipe to the thread.
+ @staticmethod
+ def _run(path: str, pipe_outlet_fd: int):
+ fd = pipe_outlet_fd
+ try:
+ fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK | fcntl.fcntl(fd, fcntl.F_GETFL))
+ polling = True
+ poll = select.poll()
+ poll.register(fd, select.POLLIN)
+ with open(path, 'wb') as f:
+ while polling:
+ for fd, events in poll.poll():
+ if events & select.POLLIN:
+ while True:
+ try:
+ data = os.read(fd, 4096)
+ if data:
+ f.write(data)
+ else:
+ polling = False
+ break
+ except BlockingIOError:
+ break
+ finally:
+ os.close(fd)
+
+ @staticmethod
+ def create(path: str) -> typing.Tuple[typing.BinaryIO, typing.Coroutine]:
+ """Create the pipe and the thread.
+ Returns the inlet of the pipe and a coroutine can be await for the termination of the thread."""
+ pipe_outlet, pipe_inlet = os.pipe2(os.O_CLOEXEC)
+ wait_thread = asyncio.to_thread(FileIOWriter._run, path, pipe_outlet)
+ return open(pipe_inlet, 'wb'), wait_thread
diff --git a/crmsh/pyshim.py b/crmsh/pyshim.py
new file mode 100644
index 0000000..640ad18
--- /dev/null
+++ b/crmsh/pyshim.py
@@ -0,0 +1,21 @@
+import functools
+
+
+try:
+ from functools import cache
+except ImportError:
+ def cache(f):
+ cached_return_value = dict()
+
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ nonlocal cached_return_value
+ key = (tuple(args), tuple(sorted(kwargs.items())))
+ try:
+ return cached_return_value[key]
+ except KeyError:
+ ret = f(*args, **kwargs)
+ cached_return_value[key] = ret
+ return ret
+
+ return wrapper
diff --git a/crmsh/qdevice.py b/crmsh/qdevice.py
new file mode 100644
index 0000000..d603623
--- /dev/null
+++ b/crmsh/qdevice.py
@@ -0,0 +1,721 @@
+import os
+import re
+import socket
+import functools
+import subprocess
+import tempfile
+import typing
+from enum import Enum
+
+import crmsh.parallax
+from . import constants, sh
+from . import utils
+from . import parallax
+from . import corosync
+from . import xmlutil
+from . import bootstrap
+from . import lock
+from . import log
+from .service_manager import ServiceManager
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+QDEVICE_ADD = "add"
+QDEVICE_REMOVE = "remove"
+
+
+class QdevicePolicy(Enum):
+ QDEVICE_RELOAD = 0
+ QDEVICE_RESTART = 1
+ QDEVICE_RESTART_LATER = 2
+
+
+def evaluate_qdevice_quorum_effect(mode, diskless_sbd=False, is_stage=False):
+ """
+ While adding/removing qdevice, get current expected votes and actual total votes,
+ to calculate after adding/removing qdevice, whether cluster has quorum
+ return different policy
+ """
+ quorum_votes_dict = utils.get_quorum_votes_dict()
+ expected_votes = int(quorum_votes_dict["Expected"])
+ actual_votes = int(quorum_votes_dict["Total"])
+ if mode == QDEVICE_ADD:
+ expected_votes += 1
+ elif mode == QDEVICE_REMOVE:
+ actual_votes -= 1
+
+ if utils.calculate_quorate_status(expected_votes, actual_votes) and not diskless_sbd:
+ # safe to use reload
+ return QdevicePolicy.QDEVICE_RELOAD
+ elif mode == QDEVICE_ADD and not is_stage:
+ # Add qdevice from init process, safe to restart
+ return QdevicePolicy.QDEVICE_RESTART
+ elif xmlutil.CrmMonXmlParser().is_any_resource_running():
+ # will lose quorum, and with RA running
+ # no reload, no restart cluster service
+ # just leave a warning
+ return QdevicePolicy.QDEVICE_RESTART_LATER
+ else:
+ # will lose quorum, without RA running
+ # safe to restart cluster service
+ return QdevicePolicy.QDEVICE_RESTART
+
+
+def qnetd_lock_for_same_cluster_name(func):
+ """
+ Decorator to claim lock on qnetd, to avoid the same cluster name added in qnetd
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ cluster_name = args[0].cluster_name
+ lock_dir = "/run/.crmsh_qdevice_lock_for_{}".format(cluster_name)
+ lock_inst = lock.RemoteLock(args[0].qnetd_addr, for_join=False, lock_dir=lock_dir, wait=False)
+ try:
+ with lock_inst.lock():
+ func(*args, **kwargs)
+ except lock.ClaimLockError:
+ utils.fatal("Duplicated cluster name \"{}\"!".format(cluster_name))
+ except lock.SSHError as err:
+ utils.fatal(err)
+ return wrapper
+
+
+def qnetd_lock_for_multi_cluster(func):
+ """
+ Decorator to claim lock on qnetd, to avoid possible race condition
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ lock_inst = lock.RemoteLock(args[0].qnetd_addr, for_join=False, no_warn=True)
+ try:
+ with lock_inst.lock():
+ func(*args, **kwargs)
+ except (lock.SSHError, lock.ClaimLockError) as err:
+ utils.fatal(err)
+ return wrapper
+
+
+class QDevice(object):
+ """
+ Class to manage qdevice configuration and services
+
+ Whole certification process:
+ For init
+ Step 1: init_db_on_qnetd
+ Step 2: fetch_qnetd_crt_from_qnetd
+ Step 3: copy_qnetd_crt_to_cluster
+ Step 4: init_db_on_cluster
+ Step 5: create_ca_request
+ Step 6: copy_crq_to_qnetd
+ Step 7: sign_crq_on_qnetd
+ Step 8: fetch_cluster_crt_from_qnetd
+ Step 9: import_cluster_crt
+ Step 10: copy_p12_to_cluster
+ Step 11: import_p12_on_cluster
+
+ For join
+ Step 1: fetch_qnetd_crt_from_cluster
+ Step 2: init_db_on_local
+ Step 3: fetch_p12_from_cluster
+ Step 4: import_p12_on_local
+ """
+ qnetd_service = "corosync-qnetd.service"
+ qnetd_cacert_filename = "qnetd-cacert.crt"
+ qdevice_crq_filename = "qdevice-net-node.crq"
+ qdevice_p12_filename = "qdevice-net-node.p12"
+ qnetd_path = "/etc/corosync/qnetd"
+ qdevice_path = "/etc/corosync/qdevice/net"
+ qdevice_db_path = "/etc/corosync/qdevice/net/nssdb"
+
+ def __init__(self, qnetd_addr, port=5403, algo="ffsplit", tie_breaker="lowest",
+ tls="on", ssh_user=None, cluster_node=None, cmds=None, mode=None, cluster_name=None, is_stage=False):
+ """
+ Init function
+ """
+ self.qnetd_addr = qnetd_addr
+ self.port = port
+ self.algo = algo
+ self.tie_breaker = tie_breaker
+ self.tls = tls
+ self.ssh_user = ssh_user
+ self.cluster_node = cluster_node
+ self.cmds = cmds
+ self.mode = mode
+ self.cluster_name = cluster_name
+ self.qdevice_reload_policy = QdevicePolicy.QDEVICE_RESTART
+ self.is_stage = is_stage
+ self.using_diskless_sbd = False
+
+ @property
+ def qnetd_cacert_on_qnetd(self):
+ """
+ Return path of qnetd-cacert.crt on qnetd node
+ """
+ return "{}/nssdb/{}".format(self.qnetd_path, self.qnetd_cacert_filename)
+
+ @property
+ def qnetd_cacert_on_local(self):
+ """
+ Return path of qnetd-cacert.crt on local node
+ """
+ return "{}/{}/{}".format(self.qdevice_path, self.qnetd_addr, self.qnetd_cacert_filename)
+
+ @property
+ def qnetd_cacert_on_cluster(self):
+ """
+ Return path of qnetd-cacert.crt on cluster node
+ """
+ return "{}/{}/{}".format(self.qdevice_path, self.cluster_node, self.qnetd_cacert_filename)
+
+ @property
+ def qdevice_crq_on_qnetd(self):
+ """
+ Return path of qdevice-net-node.crq on qnetd node
+ """
+ return "{}/nssdb/{}.{}".format(self.qnetd_path, self.qdevice_crq_filename, self.cluster_name)
+
+ @property
+ def qdevice_crq_on_local(self):
+ """
+ Return path of qdevice-net-node.crq on local node
+ """
+ return "{}/nssdb/{}".format(self.qdevice_path, self.qdevice_crq_filename)
+
+ @property
+ def qnetd_cluster_crt_on_qnetd(self):
+ """
+ Return path of cluster-cluster_name.crt on qnetd node
+ """
+ return "{}/nssdb/cluster-{}.crt".format(self.qnetd_path, self.cluster_name)
+
+ @property
+ def qnetd_cluster_crt_on_local(self):
+ """
+ Return path of cluster-cluster_name.crt on local node
+ """
+ return "{}/{}/{}".format(self.qdevice_path, self.qnetd_addr, os.path.basename(self.qnetd_cluster_crt_on_qnetd))
+
+ @property
+ def qdevice_p12_on_local(self):
+ """
+ Return path of qdevice-net-node.p12 on local node
+ """
+ return "{}/nssdb/{}".format(self.qdevice_path, self.qdevice_p12_filename)
+
+ @property
+ def qdevice_p12_on_cluster(self):
+ """
+ Return path of qdevice-net-node.p12 on cluster node
+ """
+ return "{}/{}/{}".format(self.qdevice_path, self.cluster_node, self.qdevice_p12_filename)
+
+ @staticmethod
+ def check_qnetd_addr(qnetd_addr):
+ qnetd_ip = None
+ try:
+ # socket.getaddrinfo works for both ipv4 and ipv6 address
+ # The function returns a list of 5-tuples with the following structure:
+ # (family, type, proto, canonname, sockaddr)
+ # sockaddr is a tuple describing a socket address, whose format depends on the returned family
+ # (a (address, port) 2-tuple for AF_INET, a (address, port, flow info, scope id) 4-tuple for AF_INET6)
+ res = socket.getaddrinfo(qnetd_addr, None)
+ qnetd_ip = res[0][-1][0]
+ except socket.error:
+ raise ValueError("host \"{}\" is unreachable".format(qnetd_addr))
+
+ utils.ping_node(qnetd_addr)
+
+ if utils.InterfacesInfo.ip_in_local(qnetd_ip):
+ raise ValueError("host for qnetd must be a remote one")
+
+ if not utils.check_port_open(qnetd_ip, 22):
+ raise ValueError("ssh service on \"{}\" not available".format(qnetd_addr))
+
+ @staticmethod
+ def check_qdevice_port(qdevice_port):
+ if not utils.valid_port(qdevice_port):
+ raise ValueError("invalid qdevice port range(1024 - 65535)")
+
+ @staticmethod
+ def check_qdevice_algo(qdevice_algo):
+ if qdevice_algo not in ("ffsplit", "lms"):
+ raise ValueError("invalid ALGORITHM choice: '{}' (choose from 'ffsplit', 'lms')".format(qdevice_algo))
+
+ @staticmethod
+ def check_qdevice_tie_breaker(qdevice_tie_breaker):
+ if qdevice_tie_breaker not in ("lowest", "highest") and not utils.valid_nodeid(qdevice_tie_breaker):
+ raise ValueError("invalid qdevice tie_breaker(lowest/highest/valid_node_id)")
+
+ @staticmethod
+ def check_qdevice_tls(qdevice_tls):
+ if qdevice_tls not in ("on", "off", "required"):
+ raise ValueError("invalid TLS choice: '{}' (choose from 'on', 'off', 'required')".format(qdevice_tls))
+
+ @staticmethod
+ def check_qdevice_heuristics_mode(mode):
+ if not mode:
+ return
+ if mode not in ("on", "sync", "off"):
+ raise ValueError("invalid MODE choice: '{}' (choose from 'on', 'sync', 'off')".format(mode))
+
+ @staticmethod
+ def check_qdevice_heuristics(cmds):
+ if not cmds:
+ return
+ for cmd in cmds.strip(';').split(';'):
+ if not cmd.startswith('/'):
+ raise ValueError("commands for heuristics should be absolute path")
+ if not os.path.exists(cmd.split()[0]):
+ raise ValueError("command {} not exist".format(cmd.split()[0]))
+
+ @staticmethod
+ def check_package_installed(pkg, remote=None):
+ if not utils.package_is_installed(pkg, remote_addr=remote):
+ raise ValueError("Package \"{}\" not installed on {}".format(pkg, remote if remote else "this node"))
+
+ def valid_qdevice_options(self):
+ """
+ Validate qdevice related options
+ """
+ self.check_package_installed("corosync-qdevice")
+ self.check_qnetd_addr(self.qnetd_addr)
+ self.check_qdevice_port(self.port)
+ self.check_qdevice_algo(self.algo)
+ self.check_qdevice_tie_breaker(self.tie_breaker)
+ self.check_qdevice_tls(self.tls)
+ self.check_qdevice_heuristics(self.cmds)
+ self.check_qdevice_heuristics_mode(self.mode)
+
+ def valid_qnetd(self):
+ """
+ Validate on qnetd node
+ """
+ exception_msg = ""
+ suggest = ""
+ duplicated_cluster_name = False
+ shell = sh.cluster_shell()
+ if not utils.package_is_installed("corosync-qnetd", remote_addr=self.qnetd_addr):
+ exception_msg = "Package \"corosync-qnetd\" not installed on {}!".format(self.qnetd_addr)
+ suggest = "install \"corosync-qnetd\" on {}".format(self.qnetd_addr)
+ elif ServiceManager().service_is_active("corosync-qnetd", remote_addr=self.qnetd_addr):
+ cmd = "corosync-qnetd-tool -l -c {}".format(self.cluster_name)
+ if shell.get_stdout_or_raise_error(cmd, self.qnetd_addr):
+ duplicated_cluster_name = True
+ else:
+ cmd = "test -f {}".format(self.qnetd_cluster_crt_on_qnetd)
+ try:
+ shell.get_stdout_or_raise_error(cmd, self.qnetd_addr)
+ except ValueError:
+ # target file not exist
+ pass
+ else:
+ duplicated_cluster_name = True
+ if duplicated_cluster_name:
+ exception_msg = "This cluster's name \"{}\" already exists on qnetd server!".format(self.cluster_name)
+ suggest = "consider to use the different cluster-name property"
+
+ if exception_msg:
+ if self.is_stage:
+ exception_msg += "\nPlease {}.".format(suggest)
+ else:
+ exception_msg += "\nCluster service already successfully started on this node except qdevice service.\nIf you still want to use qdevice, {}.\nThen run command \"crm cluster init\" with \"qdevice\" stage, like:\n crm cluster init qdevice qdevice_related_options\nThat command will setup qdevice separately.".format(suggest)
+ raise ValueError(exception_msg)
+
+ def enable_qnetd(self):
+ ServiceManager().enable_service(self.qnetd_service, remote_addr=self.qnetd_addr)
+
+ def disable_qnetd(self):
+ ServiceManager().disable_service(self.qnetd_service, remote_addr=self.qnetd_addr)
+
+ def start_qnetd(self):
+ ServiceManager().start_service(self.qnetd_service, remote_addr=self.qnetd_addr)
+
+ def stop_qnetd(self):
+ ServiceManager().stop_service(self.qnetd_service, remote_addr=self.qnetd_addr)
+
+ def set_cluster_name(self):
+ if not self.cluster_name:
+ self.cluster_name = corosync.get_value('totem.cluster_name')
+ if not self.cluster_name:
+ raise ValueError("No cluster_name found in {}".format(corosync.conf()))
+
+ @qnetd_lock_for_multi_cluster
+ def init_db_on_qnetd(self):
+ """
+ Certificate process for init
+ Step 1
+ Initialize database on QNetd server by running corosync-qnetd-certutil -i
+ """
+ cmd = "test -f {}".format(self.qnetd_cacert_on_qnetd)
+ try:
+ parallax.parallax_call([self.qnetd_addr], cmd)
+ except ValueError:
+ # target file not exist
+ pass
+ else:
+ return
+
+ cmd = "corosync-qnetd-certutil -i"
+ desc = "Step 1: Initialize database on {}".format(self.qnetd_addr)
+ QDevice.log_only_to_file(desc, cmd)
+ parallax.parallax_call([self.qnetd_addr], cmd)
+
+ def fetch_qnetd_crt_from_qnetd(self):
+ """
+ Certificate process for init
+ Step 2
+ Fetch QNetd CA certificate(qnetd-cacert.crt) from QNetd server
+ """
+ if os.path.exists(self.qnetd_cacert_on_local):
+ return
+
+ desc = "Step 2: Fetch {} from {}".format(self.qnetd_cacert_filename, self.qnetd_addr)
+ QDevice.log_only_to_file(desc)
+ crmsh.parallax.parallax_slurp([self.qnetd_addr], self.qdevice_path, self.qnetd_cacert_on_qnetd)
+
+ def copy_qnetd_crt_to_cluster(self):
+ """
+ Certificate process for init
+ Step 3
+ Copy exported QNetd CA certificate (qnetd-cacert.crt) to every node
+ """
+ node_list = utils.list_cluster_nodes_except_me()
+ if not node_list:
+ return
+
+ desc = "Step 3: Copy exported {} to {}".format(self.qnetd_cacert_filename, node_list)
+ QDevice.log_only_to_file(desc)
+ self._copy_file_to_remote_hosts(
+ os.path.dirname(self.qnetd_cacert_on_local),
+ node_list, self.qdevice_path,
+ recursive=True,
+ )
+
+ @staticmethod
+ def _enclose_inet6_addr(addr: str):
+ if ':' in addr:
+ return f'[{addr}]'
+ else:
+ return addr
+
+ @classmethod
+ def _copy_file_to_remote_hosts(cls, local_file, remote_hosts: typing.Iterable[str], remote_path, recursive=False):
+ crmsh.parallax.parallax_copy(remote_hosts, local_file, remote_path, recursive)
+
+ def init_db_on_cluster(self):
+ """
+ Certificate process for init
+ Step 4
+ On one of cluster node initialize database by running
+ /usr/sbin/corosync-qdevice-net-certutil -i -c qnetd-cacert.crt
+ """
+ node_list = utils.list_cluster_nodes()
+ cmd = "corosync-qdevice-net-certutil -i -c {}".format(self.qnetd_cacert_on_local)
+ desc = "Step 4: Initialize database on {}".format(node_list)
+ QDevice.log_only_to_file(desc, cmd)
+ crmsh.parallax.parallax_call(node_list, cmd)
+
+ def create_ca_request(self):
+ """
+ Certificate process for init
+ Step 5
+ Generate certificate request:
+ /usr/sbin/corosync-qdevice-net-certutil -r -n Cluster
+ (Cluster name must match cluster_name key in the corosync.conf)
+ """
+ cmd = "corosync-qdevice-net-certutil -r -n {}".format(self.cluster_name)
+ QDevice.log_only_to_file("Step 5: Generate certificate request {}".format(self.qdevice_crq_filename), cmd)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+ def copy_crq_to_qnetd(self):
+ """
+ Certificate process for init
+ Step 6
+ Copy exported CRQ to QNetd server
+ """
+ desc = "Step 6: Copy {} to {}".format(self.qdevice_crq_filename, self.qnetd_addr)
+ QDevice.log_only_to_file(desc)
+ self._copy_file_to_remote_hosts(self.qdevice_crq_on_local, [self.qnetd_addr], self.qdevice_crq_on_qnetd)
+
+ def sign_crq_on_qnetd(self):
+ """
+ Certificate process for init
+ Step 7
+ On QNetd server sign and export cluster certificate by running
+ corosync-qnetd-certutil -s -c qdevice-net-node.crq -n Cluster
+ """
+ desc = "Step 7: Sign and export cluster certificate on {}".format(self.qnetd_addr)
+ cmd = "corosync-qnetd-certutil -s -c {} -n {}".\
+ format(self.qdevice_crq_on_qnetd, self.cluster_name)
+ QDevice.log_only_to_file(desc, cmd)
+ parallax.parallax_call([self.qnetd_addr], cmd)
+
+ def fetch_cluster_crt_from_qnetd(self):
+ """
+ Certificate process for init
+ Step 8
+ Copy exported CRT to node where certificate request was created
+ """
+ desc = "Step 8: Fetch {} from {}".format(os.path.basename(self.qnetd_cluster_crt_on_qnetd), self.qnetd_addr)
+ QDevice.log_only_to_file(desc)
+ crmsh.parallax.parallax_slurp([self.qnetd_addr], self.qdevice_path, self.qnetd_cluster_crt_on_qnetd)
+
+ def import_cluster_crt(self):
+ """
+ Certificate process for init
+ Step 9
+ Import certificate on node where certificate request was created by
+ running /usr/sbin/corosync-qdevice-net-certutil -M -c cluster-Cluster.crt
+ """
+ cmd = "corosync-qdevice-net-certutil -M -c {}".format(self.qnetd_cluster_crt_on_local)
+ QDevice.log_only_to_file(
+ "Step 9: Import certificate file {} on local".format(os.path.basename(self.qnetd_cluster_crt_on_local)),
+ cmd)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+ def copy_p12_to_cluster(self):
+ """
+ Certificate process for init
+ Step 10
+ Copy output qdevice-net-node.p12 to all other cluster nodes
+ """
+ node_list = utils.list_cluster_nodes_except_me()
+ if not node_list:
+ return
+
+ desc = "Step 10: Copy {} to {}".format(self.qdevice_p12_filename, node_list)
+ QDevice.log_only_to_file(desc)
+ self._copy_file_to_remote_hosts(self.qdevice_p12_on_local, node_list, self.qdevice_p12_on_local)
+
+ def import_p12_on_cluster(self):
+ """
+ Certificate process for init
+ Step 11
+ Import cluster certificate and key on all other cluster nodes:
+ /usr/sbin/corosync-qdevice-net-certutil -m -c qdevice-net-node.p12
+ """
+ node_list = utils.list_cluster_nodes_except_me()
+ if not node_list:
+ return
+
+ desc = "Step 11: Import {} on {}".format(self.qdevice_p12_filename, node_list)
+ cmd = "corosync-qdevice-net-certutil -m -c {}".format(self.qdevice_p12_on_local)
+ QDevice.log_only_to_file(desc, cmd)
+ parallax.parallax_call(node_list, cmd)
+
+ def certificate_process_on_init(self):
+ """
+ The qdevice certificate process on init node
+ """
+ self.init_db_on_qnetd()
+ self.fetch_qnetd_crt_from_qnetd()
+ self.copy_qnetd_crt_to_cluster()
+ self.init_db_on_cluster()
+ self.create_ca_request()
+ self.copy_crq_to_qnetd()
+ self.sign_crq_on_qnetd()
+ self.fetch_cluster_crt_from_qnetd()
+ self.import_cluster_crt()
+ self.copy_p12_to_cluster()
+ self.import_p12_on_cluster()
+
+ def fetch_qnetd_crt_from_cluster(self):
+ """
+ Certificate process for join
+ Step 1
+ Fetch QNetd CA certificate(qnetd-cacert.crt) from init node
+ """
+ if os.path.exists(self.qnetd_cacert_on_cluster):
+ return
+
+ desc = "Step 1: Fetch {} from {}".format(self.qnetd_cacert_filename, self.cluster_node)
+ QDevice.log_only_to_file(desc)
+ crmsh.parallax.parallax_slurp([self.cluster_node], self.qdevice_path, self.qnetd_cacert_on_local)
+
+ def init_db_on_local(self):
+ """
+ Certificate process for join
+ Step 2
+ Initialize database by running
+ /usr/sbin/corosync-qdevice-net-certutil -i -c qnetd-cacert.crt
+ """
+ if os.path.exists(self.qdevice_db_path):
+ utils.rmdir_r(self.qdevice_db_path)
+
+ cmd = "corosync-qdevice-net-certutil -i -c {}".format(self.qnetd_cacert_on_cluster)
+ QDevice.log_only_to_file("Step 2: Initialize database on local", cmd)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+ def fetch_p12_from_cluster(self):
+ """
+ Certificate process for join
+ Step 3
+ Fetch p12 key file from init node
+ """
+ if os.path.exists(self.qdevice_p12_on_cluster):
+ return
+
+ desc = "Step 3: Fetch {} from {}".format(self.qdevice_p12_filename, self.cluster_node)
+ QDevice.log_only_to_file(desc)
+ crmsh.parallax.parallax_slurp([self.cluster_node], self.qdevice_path, self.qdevice_p12_on_local)
+
+ def import_p12_on_local(self):
+ """
+ Certificate process for join
+ Step 4
+ Import cluster certificate and key
+ """
+ cmd = "corosync-qdevice-net-certutil -m -c {}".format(self.qdevice_p12_on_cluster)
+ QDevice.log_only_to_file("Step 4: Import cluster certificate and key", cmd)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+ def certificate_process_on_join(self):
+ """
+ The qdevice certificate process on join node
+ """
+ self.fetch_qnetd_crt_from_cluster()
+ self.init_db_on_local()
+ self.fetch_p12_from_cluster()
+ self.import_p12_on_local()
+
+ def write_qdevice_config(self):
+ """
+ Write qdevice attributes to config file
+ """
+ p = corosync.Parser(utils.read_from_file(corosync.conf()))
+ p.remove("quorum.device")
+ p.add('quorum', corosync.make_section('quorum.device', []))
+ p.set('quorum.device.votes', '1')
+ p.set('quorum.device.model', 'net')
+ p.add('quorum.device', corosync.make_section('quorum.device.net', []))
+ p.set('quorum.device.net.tls', self.tls)
+ p.set('quorum.device.net.host', self.qnetd_addr)
+ p.set('quorum.device.net.port', self.port)
+ p.set('quorum.device.net.algorithm', self.algo)
+ p.set('quorum.device.net.tie_breaker', self.tie_breaker)
+ if self.cmds:
+ p.add('quorum.device', corosync.make_section('quorum.device.heuristics', []))
+ p.set('quorum.device.heuristics.mode', self.mode)
+ for i, cmd in enumerate(self.cmds.strip(';').split(';')):
+ cmd_name = re.sub("[.-]", "_", os.path.basename(cmd.split()[0]))
+ exec_name = "exec_{}{}".format(cmd_name, i)
+ p.set('quorum.device.heuristics.{}'.format(exec_name), cmd)
+ utils.str2file(p.to_string(), corosync.conf())
+
+ @staticmethod
+ def remove_qdevice_config():
+ """
+ Remove configuration of qdevice
+ """
+ p = corosync.Parser(utils.read_from_file(corosync.conf()))
+ p.remove("quorum.device")
+ utils.str2file(p.to_string(), corosync.conf())
+
+ @staticmethod
+ def remove_qdevice_db(addr_list=[]):
+ """
+ Remove qdevice database
+ """
+ if not os.path.exists(QDevice.qdevice_db_path):
+ return
+ node_list = addr_list if addr_list else utils.list_cluster_nodes()
+ cmd = "rm -rf {}/*".format(QDevice.qdevice_path)
+ QDevice.log_only_to_file("Remove qdevice database", cmd)
+ parallax.parallax_call(node_list, cmd)
+
+ @classmethod
+ def remove_certification_files_on_qnetd(cls):
+ """
+ Remove this cluster related .crq and .crt files on qnetd
+ """
+ if not utils.is_qdevice_configured():
+ return
+ qnetd_host = corosync.get_value('quorum.device.net.host')
+ cluster_name = corosync.get_value('totem.cluster_name')
+ cls_inst = cls(qnetd_host, cluster_name=cluster_name)
+ shell = sh.cluster_shell()
+ cmd = "test -f {crt_file} && rm -f {crt_file}".format(crt_file=cls_inst.qnetd_cluster_crt_on_qnetd)
+ shell.get_stdout_or_raise_error(cmd, qnetd_host)
+ cmd = "test -f {crq_file} && rm -f {crq_file}".format(crq_file=cls_inst.qdevice_crq_on_qnetd)
+ shell.get_stdout_or_raise_error(cmd, qnetd_host)
+
+ def config_qdevice(self):
+ """
+ Update configuration and reload corosync if necessary
+ """
+ self.write_qdevice_config()
+ if not corosync.is_unicast():
+ corosync.add_nodelist_from_cmaptool()
+ with logger_utils.status_long("Update configuration"):
+ bootstrap.update_expected_votes()
+ if self.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
+ utils.cluster_run_cmd("crm corosync reload")
+
+ def start_qdevice_service(self):
+ """
+ Start qdevice and qnetd service
+ """
+ logger.info("Enable corosync-qdevice.service in cluster")
+ utils.cluster_run_cmd("systemctl enable corosync-qdevice")
+ if self.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
+ logger.info("Starting corosync-qdevice.service in cluster")
+ utils.cluster_run_cmd("systemctl restart corosync-qdevice")
+ elif self.qdevice_reload_policy == QdevicePolicy.QDEVICE_RESTART:
+ logger.info("Restarting cluster service")
+ utils.cluster_run_cmd("crm cluster restart")
+ bootstrap.wait_for_cluster()
+ else:
+ logger.warning("To use qdevice service, need to restart cluster service manually on each node")
+
+ logger.info("Enable corosync-qnetd.service on {}".format(self.qnetd_addr))
+ self.enable_qnetd()
+ logger.info("Starting corosync-qnetd.service on {}".format(self.qnetd_addr))
+ self.start_qnetd()
+
+ def adjust_sbd_watchdog_timeout_with_qdevice(self):
+ """
+ Adjust SBD_WATCHDOG_TIMEOUT when configuring qdevice and diskless SBD
+ """
+ from .sbd import SBDManager, SBDTimeout
+ utils.check_all_nodes_reachable()
+ self.using_diskless_sbd = SBDManager.is_using_diskless_sbd()
+ # add qdevice after diskless sbd started
+ if self.using_diskless_sbd:
+ res = SBDManager.get_sbd_value_from_config("SBD_WATCHDOG_TIMEOUT")
+ if not res or int(res) < SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE:
+ sbd_watchdog_timeout_qdevice = SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE
+ SBDManager.update_configuration({"SBD_WATCHDOG_TIMEOUT": str(sbd_watchdog_timeout_qdevice)})
+ utils.set_property("stonith-timeout", SBDTimeout.get_stonith_timeout())
+
+ @qnetd_lock_for_same_cluster_name
+ def config_and_start_qdevice(self):
+ """
+ Wrap function to collect functions to config and start qdevice
+ """
+ QDevice.remove_qdevice_db()
+ if self.tls == "on":
+ with logger_utils.status_long("Qdevice certification process"):
+ self.certificate_process_on_init()
+ self.adjust_sbd_watchdog_timeout_with_qdevice()
+ self.qdevice_reload_policy = evaluate_qdevice_quorum_effect(QDEVICE_ADD, self.using_diskless_sbd, self.is_stage)
+ self.config_qdevice()
+ self.start_qdevice_service()
+
+ @staticmethod
+ def check_qdevice_vote():
+ """
+ Check if qdevice can contribute vote
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("corosync-quorumtool -s", success_exit_status={0, 2})
+ res = re.search(r'\s+0\s+0\s+Qdevice', out)
+ if res:
+ qnetd_host = corosync.get_value('quorum.device.net.host')
+ logger.warning("Qdevice's vote is 0, which simply means Qdevice can't talk to Qnetd({}) for various reasons.".format(qnetd_host))
+
+ @staticmethod
+ def log_only_to_file(desc, cmd=None):
+ logger_utils.log_only_to_file(desc)
+ if cmd:
+ logger_utils.log_only_to_file(f"Run: {cmd}")
diff --git a/crmsh/ra.py b/crmsh/ra.py
new file mode 100644
index 0000000..518070c
--- /dev/null
+++ b/crmsh/ra.py
@@ -0,0 +1,977 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import os
+import subprocess
+import copy
+import re
+import glob
+from lxml import etree
+from . import cache
+from . import constants
+from . import config
+from . import options
+from . import userdir
+from . import utils
+from .sh import ShellUtils
+from .utils import stdout2list, is_program, is_process, to_ascii
+from .utils import os_types_list
+from .utils import crm_msec, crm_time_cmp
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+#
+# Resource Agents interface (meta-data, parameters, etc)
+#
+
+lrmadmin_prog = "lrmadmin"
+
+
+def lrmadmin(opts, xml=False):
+ """
+ Get information directly from lrmd using lrmadmin.
+ """
+ _rc, l = stdout2list("%s %s" % (lrmadmin_prog, opts))
+ if l and not xml:
+ l = l[1:] # skip the first line
+ return l
+
+
+def crm_resource(opts):
+ '''
+ Get information from crm_resource.
+ '''
+ _rc, l = stdout2list("crm_resource %s" % opts, stderr_on=False)
+ return l
+
+
+@utils.memoize
+def can_use_lrmadmin():
+ from distutils import version
+ # after this glue release all users can get meta-data and
+ # similar from lrmd
+ minimum_glue = "1.0.10"
+ _rc, glue_ver = ShellUtils().get_stdout("%s -v" % lrmadmin_prog, stderr_on=False)
+ if not glue_ver: # lrmadmin probably not found
+ return False
+ v_min = version.LooseVersion(minimum_glue)
+ v_this = version.LooseVersion(glue_ver)
+ if v_this < v_min:
+ return False
+ if userdir.getuser() not in ("root", config.path.crm_daemon_user):
+ return False
+ if not (is_program(lrmadmin_prog) and is_process(pacemaker_execd())):
+ return False
+ return utils.ext_cmd(">/dev/null 2>&1 %s -C" % lrmadmin_prog) == 0
+
+
+@utils.memoize
+def can_use_crm_resource():
+ _rc, s = ShellUtils().get_stdout("crm_resource --list-ocf-providers", stderr_on=False)
+ return s != ""
+
+
+def ra_classes():
+ '''
+ List of RA classes.
+ '''
+ if cache.is_cached("ra_classes"):
+ return cache.retrieve("ra_classes")
+ if can_use_crm_resource():
+ l = crm_resource("--list-standards")
+ elif can_use_lrmadmin():
+ l = lrmadmin("-C")
+ else:
+ l = ["heartbeat", "lsb", "nagios", "ocf", "stonith", "systemd"]
+ l.sort()
+ return cache.store("ra_classes", l)
+
+
+def ra_providers(ra_type, ra_class="ocf"):
+ 'List of providers for a class:type.'
+ ident = "ra_providers-%s-%s" % (ra_class, ra_type)
+ if cache.is_cached(ident):
+ return cache.retrieve(ident)
+ if can_use_crm_resource():
+ if ra_class != "ocf":
+ logger.error("no providers for class %s", ra_class)
+ return []
+ l = crm_resource("--list-ocf-alternatives %s" % ra_type)
+ elif can_use_lrmadmin():
+ l = lrmadmin("-P %s %s" % (ra_class, ra_type), True)
+ else:
+ l = []
+ if ra_class == "ocf":
+ for s in glob.glob("%s/resource.d/*/%s" % (os.environ["OCF_ROOT"], ra_type)):
+ a = s.split("/")
+ if len(a) == 7:
+ l.append(a[5])
+ l.sort()
+ return cache.store(ident, l)
+
+
+def ra_providers_all(ra_class="ocf"):
+ '''
+ List of providers for a class.
+ '''
+ if ra_class != "ocf":
+ return []
+ ident = "ra_providers_all-%s" % ra_class
+ if cache.is_cached(ident):
+ return cache.retrieve(ident)
+ ocf = os.path.join(os.environ["OCF_ROOT"], "resource.d")
+ if os.path.isdir(ocf):
+ return cache.store(ident, sorted(s for s in os.listdir(ocf)
+ if os.path.isdir(os.path.join(ocf, s))))
+ return []
+
+
+def os_types(ra_class):
+ 'List of types for a class.'
+ def stonith_types():
+ rc, l = stdout2list("stonith -L")
+ if rc != 0:
+ # stonith(8) may not be installed
+ logger.debug("stonith exited with code %d", rc)
+ l = []
+ for ra in os_types_list("/usr/sbin/fence_*"):
+ if ra not in ("fence_ack_manual", "fence_pcmk", "fence_legacy"):
+ l.append(ra)
+ return l
+
+ def systemd_types():
+ l = []
+ rc, lines = stdout2list("systemctl list-unit-files --full")
+ if rc != 0:
+ return l
+ t = re.compile(r'^(.+)\.service')
+ for line in lines:
+ m = t.search(line)
+ if m:
+ l.append(m.group(1))
+ return l
+
+ l = []
+ if ra_class == "ocf":
+ l = os_types_list("%s/resource.d/*/*" % (os.environ["OCF_ROOT"]))
+ elif ra_class == "lsb":
+ l = os_types_list("/etc/init.d/*")
+ elif ra_class == "stonith":
+ l = stonith_types()
+ elif ra_class == "nagios":
+ l = [x.replace("check_", "")
+ for x in os_types_list("%s/check_*" % config.path.nagios_plugins)]
+ elif ra_class == "systemd":
+ l = systemd_types()
+ l = list(set(l))
+ l.sort()
+ return l
+
+
+def ra_types(ra_class="ocf", ra_provider=""):
+ '''
+ List of RA type for a class.
+ '''
+
+ def find_types():
+ """
+ Actually go out and ask for the types of a class.
+ """
+ if can_use_crm_resource():
+ l = crm_resource("--list-agents %s" % ra_class)
+ elif can_use_lrmadmin():
+ l = lrmadmin("-T %s" % ra_class)
+ else:
+ l = os_types(ra_class)
+ return l
+
+ if not ra_class:
+ ra_class = "ocf"
+ ident = "ra_types-%s-%s" % (ra_class, ra_provider)
+ if cache.is_cached(ident):
+ return cache.retrieve(ident)
+
+ if not ra_provider:
+ def include(ra):
+ return True
+ else:
+ def include(ra):
+ return ra_provider in ra_providers(ra, ra_class)
+ return cache.store(ident, sorted(list(set(ra for ra in find_types() if include(ra)))))
+
+
+@utils.memoize
+def ra_meta(ra_class, ra_type, ra_provider):
+ """
+ Return metadata for the given class/type/provider
+ """
+ if can_use_crm_resource():
+ if ra_provider:
+ return crm_resource("--show-metadata %s:%s:%s" % (ra_class, ra_provider, ra_type))
+ return crm_resource("--show-metadata %s:%s" % (ra_class, ra_type))
+ elif can_use_lrmadmin():
+ return lrmadmin("-M %s %s %s" % (ra_class, ra_type, ra_provider), True)
+ else:
+ l = []
+ if ra_class == "ocf":
+ _rc, l = stdout2list("%s/resource.d/%s/%s meta-data" %
+ (os.environ["OCF_ROOT"], ra_provider, ra_type))
+ elif ra_class == "stonith":
+ if ra_type.startswith("fence_") and os.path.exists("/usr/sbin/%s" % ra_type):
+ _rc, l = stdout2list("/usr/sbin/%s -o metadata" % ra_type)
+ else:
+ _rc, l = stdout2list("stonith -m -t %s" % ra_type)
+ elif ra_class == "nagios":
+ _rc, l = stdout2list("%s/check_%s --metadata" %
+ (config.path.nagios_plugins, ra_type))
+ return l
+
+
+@utils.memoize
+def get_pe_meta():
+ return RAInfo(utils.pacemaker_schedulerd(), "metadata")
+
+
+@utils.memoize
+def get_crmd_meta():
+ return RAInfo(utils.pacemaker_controld(), "metadata",
+ exclude_from_completion=constants.crmd_metadata_do_not_complete)
+
+
+@utils.memoize
+def get_stonithd_meta():
+ return RAInfo(utils.pacemaker_fenced(), "metadata")
+
+
+@utils.memoize
+def get_cib_meta():
+ return RAInfo(utils.pacemaker_based(), "metadata")
+
+
+@utils.memoize
+def get_properties_meta():
+ meta = copy.deepcopy(get_crmd_meta())
+ meta.add_ra_params(get_pe_meta())
+ meta.add_ra_params(get_cib_meta())
+ return meta
+
+
+@utils.memoize
+def get_properties_list():
+ try:
+ return list(get_properties_meta().params().keys())
+ except:
+ return []
+
+
+def prog_meta(prog):
+ '''
+ Do external program metadata.
+ '''
+ prog = utils.pacemaker_daemon(prog)
+ if prog:
+ rc, l = stdout2list("%s metadata" % prog)
+ if rc == 0:
+ return l
+ logger.debug("%s metadata exited with code %d", prog, rc)
+ return []
+
+
+def get_nodes_text(n, tag):
+ try:
+ return n.findtext(tag).strip()
+ except:
+ return ''
+
+
+def _param_type_default(n):
+ """
+ Helper function to get (type, default) from XML parameter node
+ """
+ try:
+ content = n.find("content")
+ return content.get("type"), content.get("default")
+ except:
+ return None, None
+
+
+class RAInfo(object):
+ '''
+ A resource agent and whatever's useful about it.
+ '''
+ ra_tab = " " # four horses
+ required_ops = ("start", "stop")
+ no_interval_ops = ("start", "stop", "promote", "demote")
+ skip_ops = ("meta-data", "validate-all")
+ skip_op_attr = ("name",)
+
+ def __init__(self, ra_class, ra_type, ra_provider="heartbeat", exclude_from_completion=None):
+ self.excluded_from_completion = exclude_from_completion or []
+ self.ra_class = ra_class
+ self.ra_type = ra_type
+ self.ra_provider = ra_provider
+ if ra_class == 'ocf' and not self.ra_provider:
+ self.ra_provider = "heartbeat"
+ self.ra_elem = None
+ self.broken_ra = False
+
+ def __str__(self):
+ return "%s:%s:%s" % (self.ra_class, self.ra_provider, self.ra_type) \
+ if self.ra_class == "ocf" \
+ else "%s:%s" % (self.ra_class, self.ra_type)
+
+ def error(self, s):
+ logger.error("%s: %s", self, s)
+
+ def warn(self, s):
+ logger.warning("%s: %s", self, s)
+
+ def info(self, s):
+ logger.info("%s: %s", self, s)
+
+ def debug(self, s):
+ logger.debug("%s: %s", self, s)
+
+ def add_ra_params(self, ra):
+ '''
+ Add parameters from another RAInfo instance.
+ '''
+ try:
+ if self.mk_ra_node() is None or ra.mk_ra_node() is None:
+ return
+ except:
+ return
+ try:
+ params_node = self.ra_elem.findall("parameters")[0]
+ except:
+ params_node = etree.SubElement(self.ra_elem, "parameters")
+ for n in ra.ra_elem.xpath("//parameters/parameter"):
+ params_node.append(copy.deepcopy(n))
+
+ def mk_ra_node(self):
+ '''
+ Return the resource_agent node.
+ '''
+ if self.ra_elem is not None:
+ return self.ra_elem
+ # don't try again in vain
+ if self.broken_ra:
+ return None
+ self.broken_ra = True
+ meta = self.meta()
+ if meta is None:
+ if not config.core.ignore_missing_metadata:
+ self.error("got no meta-data, does this RA exist?")
+ return None
+ self.ra_elem = meta
+ try:
+ assert self.ra_elem.tag == 'resource-agent'
+ except Exception:
+ self.error("meta-data contains no resource-agent element")
+ return None
+ if self.ra_class == "stonith":
+ self.add_ra_params(get_stonithd_meta())
+ self.broken_ra = False
+ return self.ra_elem
+
+ def params(self, completion=False):
+ '''
+ Construct a dict of dicts: parameters are keys and
+ dictionary of attributes/values are values. Cached too.
+
+ completion:
+ If true, filter some (advanced) parameters out.
+ '''
+ if completion:
+ if self.mk_ra_node() is None:
+ return None
+ return [c.get("name")
+ for c in self.ra_elem.xpath("//parameters/parameter")
+ if c.get("name") and c.get("name") not in self.excluded_from_completion]
+ ident = "ra_params-%s" % self
+ if cache.is_cached(ident):
+ return cache.retrieve(ident)
+ if self.mk_ra_node() is None:
+ return None
+ d = {}
+ for c in self.ra_elem.xpath("//parameters/parameter"):
+ name = c.get("name")
+ if not name:
+ continue
+ required = c.get("required") if not (c.get("deprecated") or c.get("obsoletes")) else "0"
+ unique = c.get("unique")
+ typ, default = _param_type_default(c)
+ d[name] = {
+ "required": required,
+ "unique": unique,
+ "type": typ,
+ "default": default,
+ }
+ return cache.store(ident, d)
+
+ def actions(self):
+ '''
+ Construct a dict of dicts: actions are keys and
+ dictionary of attributes/values are values. Cached too.
+ '''
+ ident = "ra_actions-%s" % self
+ if cache.is_cached(ident):
+ return cache.retrieve(ident)
+ if self.mk_ra_node() is None:
+ return None
+
+ actions_dict = {}
+ actions_dict["monitor"] = []
+ for elem in self.ra_elem.xpath("//actions/action"):
+ name = elem.get("name")
+ if not name or name in self.skip_ops:
+ continue
+ d = {}
+ for key in list(elem.attrib.keys()):
+ if key in self.skip_op_attr:
+ continue
+ value = elem.get(key)
+ if value:
+ d[key] = value
+ if 'interval' not in d:
+ d['interval'] = '0s'
+ if name == "monitor":
+ actions_dict[name].append(d)
+ else:
+ actions_dict[name] = d
+
+ return cache.store(ident, actions_dict)
+
+ def param_default(self, pname):
+ '''
+ Parameter's default.
+ '''
+ d = self.params()
+ try:
+ return d[pname]["default"]
+ except:
+ return None
+
+ def normalize_parameters(self, root):
+ """
+ Find all instance_attributes/nvpair objects,
+ check if parameter exists. If not, normalize name
+ and check if THAT exists (replacing - with _).
+ If so, change the name of the parameter.
+ """
+ params = self.params()
+ if not params:
+ return
+ for nvp in root.xpath("instance_attributes/nvpair"):
+ name = nvp.get("name")
+ if name is not None and name not in params:
+ name = name.replace("-", "_")
+ if name in params:
+ nvp.attrib["name"] = name
+
+ def sanity_check_params(self, ident, nvpairs, existence_only=False):
+ '''
+ nvpairs is a list of <nvpair> tags.
+ - are all required parameters defined
+ - do all parameters exist
+ '''
+ def reqd_params_list():
+ '''
+ List of required parameters.
+ '''
+ d = self.params()
+ if not d:
+ return []
+ return [x for x in d if d[x]["required"] == '1']
+
+ def unreq_param(p):
+ '''
+ Allow for some exceptions.
+
+ - the rhcs stonith agents sometimes require "action" (in
+ the meta-data) and "port", but they're automatically
+ supplied by stonithd
+ '''
+ if self.ra_class == "stonith" and \
+ (self.ra_type.startswith("rhcs/") or
+ self.ra_type.startswith("fence_")):
+ if p in ("action", "port"):
+ return True
+ return False
+
+ rc = 0
+ d = {}
+ for nvp in nvpairs:
+ if 'name' in nvp.attrib:
+ d[nvp.get('name')] = nvp.get('value')
+ if not existence_only:
+ for p in reqd_params_list():
+ if unreq_param(p):
+ continue
+ if p not in d:
+ logger.error("%s: required parameter \"%s\" not defined", ident, p)
+ rc |= utils.get_check_rc()
+ for p in d:
+ if p.startswith("$"):
+ # these are special, non-RA parameters
+ continue
+ if p not in self.params():
+ logger.error("%s: parameter \"%s\" is not known", ident, p)
+ rc |= utils.get_check_rc()
+ return rc
+
+ def get_op_attr_value(self, op, key, role=None, depth=None):
+ """
+ Get operation's attribute value
+ Multi monitors should be distinguished by role or depth
+ """
+ try:
+ # actions_dict example:
+ # {'monitor': [
+ # {'depth': '0', 'timeout': '20s', 'interval': '10s', 'role': 'Promoted'},
+ # {'depth': '0', 'timeout': '20s', 'interval': '11s', 'role': 'Unpromoted'}
+ # ],
+ # 'start': {'timeout': '20s'},
+ # 'stop': {'timeout': '20s'}}
+ actions_dict = self.actions()
+ if not actions_dict:
+ return None
+ if op == 'monitor':
+ if role is None and depth is None:
+ return actions_dict[op][0][key]
+ if role:
+ for idx, monitor_item in enumerate(actions_dict[op]):
+ if monitor_item['role'] == role:
+ return actions_dict[op][idx][key]
+ # Technically, there could be multiple entries defining different depths for a same role.
+ if depth:
+ for idx, monitor_item in enumerate(actions_dict[op]):
+ if monitor_item['depth'] == depth:
+ return actions_dict[op][idx][key]
+ else:
+ return actions_dict[op][key]
+ except:
+ return None
+
+ def sanity_check_ops(self, ident, ops, default_timeout):
+ '''
+ ops is a list of operations
+ - do all operations exist
+ - are timeouts sensible
+ '''
+ def timeout_check(op, item_dict, adv_timeout):
+ """
+ Helper method used by sanity_check_op_timeout, to check operation's timeout
+ """
+ rc = 0
+ if "timeout" in item_dict:
+ actual_timeout = item_dict["timeout"]
+ timeout_string = "specified timeout"
+ else:
+ actual_timeout = default_timeout
+ timeout_string = "default timeout"
+ if actual_timeout and crm_time_cmp(adv_timeout, actual_timeout) > 0:
+ logger.warning("%s: %s %s for %s is smaller than the advised %s",
+ ident, timeout_string, actual_timeout, op, adv_timeout)
+ rc |= 1
+ return rc
+
+ def sanity_check_op_timeout(op, op_dict):
+ """
+ Helper method used by sanity_check_op, to check operation's timeout
+ """
+ rc = 0
+ role = None
+ depth = None
+ if op == "monitor":
+ for monitor_item in op_dict[op]:
+ role = monitor_item['role'] if 'role' in monitor_item else None
+ depth = monitor_item['depth'] if 'depth' in monitor_item else None
+ adv_timeout = self.get_op_attr_value(op, "timeout", role=role, depth=depth)
+ rc |= timeout_check(op, monitor_item, adv_timeout)
+ else:
+ adv_timeout = self.get_op_attr_value(op, "timeout")
+ rc |= timeout_check(op, op_dict[op], adv_timeout)
+ return rc
+
+ def sanity_check_op_interval(op, op_dict):
+ """
+ Helper method used by sanity_check_op, to check operation's interval
+ """
+ rc = 0
+ prev_intervals = []
+ if op == "monitor":
+ for monitor_item in op_dict[op]:
+ role = monitor_item['role'] if 'role' in monitor_item else None
+ depth = monitor_item['depth'] if 'depth' in monitor_item else None
+ # make sure interval in multi monitor operations is unique and non-zero
+ adv_interval = self.get_op_attr_value(op, "interval", role=role, depth=depth)
+ actual_interval_msec = crm_msec(monitor_item["interval"])
+ if actual_interval_msec == 0:
+ logger.warning("%s: interval in monitor should be larger than 0, advised is %s", ident, adv_interval)
+ rc |= 1
+ elif actual_interval_msec in prev_intervals:
+ logger.warning("%s: interval in monitor must be unique, advised is %s", ident, adv_interval)
+ rc |= 1
+ else:
+ prev_intervals.append(actual_interval_msec)
+ elif "interval" in op_dict[op]:
+ value = op_dict[op]["interval"]
+ value_msec = crm_msec(value)
+ if op in self.no_interval_ops and value_msec != 0:
+ logger.warning("%s: Specified interval for %s is %s, it must be 0", ident, op, value)
+ rc |= 1
+ return rc
+
+ def sanity_check_op(op, op_dict):
+ """
+ Helper method used by sanity_check_ops.
+ """
+ rc = 0
+ if self.ra_class == "stonith" and op in ("start", "stop"):
+ return rc
+ if op not in self.actions():
+ logger.warning("%s: action '%s' not found in Resource Agent meta-data", ident, op)
+ rc |= 1
+ return rc
+ rc |= sanity_check_op_interval(op, op_dict)
+ rc |= sanity_check_op_timeout(op, op_dict)
+ return rc
+
+
+ rc = 0
+ op_dict = {}
+ op_dict["monitor"] = []
+ # ops example:
+ # [
+ # ['monitor', [['role', 'Promoted'], ['interval', '10s']]],
+ # ['monitor', [['role', 'Unpromoted'], ['interval', '0']]],
+ # ['start', [['timeout', '20s'], ['interval', '0']]]
+ # ]
+ for op in ops:
+ n_op = op[0]
+ d = {}
+ for key, value in op[1]:
+ if key in self.skip_op_attr:
+ continue
+ d[key] = value
+ if n_op == "monitor":
+ op_dict["monitor"].append(d)
+ else:
+ op_dict[n_op] = d
+ for req_op in self.required_ops:
+ if req_op not in op_dict:
+ if not (self.ra_class == "stonith" and req_op in ("start", "stop")):
+ op_dict[req_op] = {}
+ # op_dict example:
+ # {'monitor': [
+ # {'role': 'Promoted', 'interval': '10s'},
+ # {'role': 'Unpromoted', 'interval': '0'}],
+ # 'start': {'timeout': '20s', 'interval': '0'},
+ # 'stop': {}
+ # }
+ for op in op_dict:
+ rc |= sanity_check_op(op, op_dict)
+ return rc
+
+
+ def meta(self):
+ '''
+ RA meta-data as raw xml.
+ Returns an etree xml object.
+ '''
+ sid = "ra_meta-%s" % self
+ if cache.is_cached(sid):
+ return cache.retrieve(sid)
+ if self.ra_class in constants.meta_progs:
+ l = prog_meta(self.ra_class)
+ elif self.ra_class in constants.meta_progs_20:
+ l = prog_meta(self.ra_class)
+ else:
+ l = ra_meta(self.ra_class, self.ra_type, self.ra_provider)
+ if not l:
+ return None
+ try:
+ xml = etree.fromstring('\n'.join(l))
+ except Exception:
+ self.error("Cannot parse meta-data XML")
+ return None
+ self.debug("read and cached meta-data")
+ return cache.store(sid, xml)
+
+ def meta_pretty(self):
+ '''
+ Print the RA meta-data in a human readable form.
+ '''
+ if self.mk_ra_node() is None:
+ return ''
+ l = []
+ title = self.meta_title()
+ l.append(title)
+ longdesc = get_nodes_text(self.ra_elem, "longdesc")
+ if longdesc:
+ l.append(longdesc)
+ if self.ra_class != "heartbeat":
+ params = self.meta_parameters()
+ if params:
+ l.append(params.rstrip())
+ actions = self.meta_actions()
+ if actions:
+ l.append(actions)
+ return '\n\n'.join(l)
+
+ def get_shortdesc(self, n):
+ name = n.get("name")
+ shortdesc = get_nodes_text(n, "shortdesc")
+ longdesc = get_nodes_text(n, "longdesc")
+ if shortdesc and shortdesc not in (name, longdesc, self.ra_type):
+ return shortdesc
+ return ''
+
+ def meta_title(self):
+ s = str(self)
+ shortdesc = self.get_shortdesc(self.ra_elem)
+ if shortdesc:
+ s = "%s (%s)" % (shortdesc, s)
+ return s
+
+ def format_parameter(self, n):
+ def meta_param_head():
+ name = n.get("name")
+ if not name:
+ return None
+ s = name
+ if n.get("required") == "1":
+ s = s + "*"
+ typ, default = _param_type_default(n)
+ if typ and default:
+ s = "%s (%s, [%s])" % (s, typ, default)
+ elif typ:
+ s = "%s (%s)" % (s, typ)
+ shortdesc = self.get_shortdesc(n)
+ s = "%s: %s" % (s, shortdesc)
+ return s
+ head = meta_param_head()
+ if not head:
+ self.error("no name attribute for parameter")
+ return ""
+ l = [head]
+ longdesc = get_nodes_text(n, "longdesc")
+ if longdesc:
+ l.append(self.ra_tab + longdesc.replace("\n", "\n" + self.ra_tab) + '\n')
+ return '\n'.join(l)
+
+ def meta_parameter(self, param):
+ if self.mk_ra_node() is None:
+ return ''
+ for c in self.ra_elem.xpath("//parameters/parameter"):
+ if c.get("name") == param:
+ return self.format_parameter(c)
+
+ def meta_parameters(self):
+ if self.mk_ra_node() is None:
+ return ''
+ l = []
+ for c in self.ra_elem.xpath("//parameters/parameter"):
+ s = self.format_parameter(c)
+ if s:
+ l.append(s)
+ if l:
+ return "Parameters (*: required, []: default):\n\n" + '\n'.join(l)
+
+ def meta_actions(self):
+ def meta_action_head(n):
+ name = n.get("name")
+ if not name or name in self.skip_ops:
+ return ''
+ s = "%-13s" % name
+ for a in list(n.attrib.keys()):
+ if a in self.skip_op_attr:
+ continue
+ v = n.get(a)
+ if v:
+ s = "%s %s=%s" % (s, a, v)
+ return s
+ l = []
+ for c in self.ra_elem.xpath("//actions/action"):
+ s = meta_action_head(c)
+ if s:
+ l.append(self.ra_tab + s)
+ if not l:
+ return None
+ return "Operations' defaults (advisory minimum):\n\n" + '\n'.join(l)
+
+
+def get_ra(r):
+ """
+ Argument is either an xml resource tag with class, provider and type attributes,
+ or a CLI style class:provider:type string.
+ """
+ if isinstance(r, str):
+ cls, provider, typ = disambiguate_ra_type(r)
+ else:
+ cls, provider, typ = r.get('class'), r.get('provider'), r.get('type')
+ # note order of arguments!
+ return RAInfo(cls, typ, provider)
+
+
+#
+# resource type definition
+#
+def ra_type_validate(s, ra_class, provider, rsc_type):
+ '''
+ Only ocf ra class supports providers.
+ '''
+ if not rsc_type:
+ logger.error("bad resource type specification %s", s)
+ return False
+ if ra_class == "ocf":
+ if not provider:
+ logger.error("provider could not be determined for %s", s)
+ return False
+ else:
+ if provider:
+ logger.warning("ra class %s does not support providers", ra_class)
+ return True
+ return True
+
+
+def pick_provider(providers):
+ '''
+ Pick the most appropriate choice from a
+ list of providers, falling back to
+ 'heartbeat' if no good choice is found
+ '''
+ if not providers or 'heartbeat' in providers:
+ return 'heartbeat'
+ elif 'pacemaker' in providers:
+ return 'pacemaker'
+ return providers[0]
+
+
+def disambiguate_ra_type(s):
+ '''
+ Unravel [class:[provider:]]type
+ '''
+ l = s.split(':')
+ if not l or len(l) > 3:
+ return ["", "", ""]
+ if len(l) == 3:
+ return l
+ elif len(l) == 2:
+ cl, tp = l
+ else:
+ cl, tp = "ocf", l[0]
+ pr = pick_provider(ra_providers(tp, cl)) if cl == 'ocf' else ''
+ return cl, pr, tp
+
+
+def can_validate_agent(agent):
+ if utils.getuser() != 'root':
+ return False
+ if isinstance(agent, str):
+ c, p, t = disambiguate_ra_type(agent)
+ if c != "ocf":
+ return False
+ agent = RAInfo(c, t, p)
+ if agent.mk_ra_node() is None:
+ return False
+ if len(agent.ra_elem.xpath('.//actions/action[@name="validate-all"]')) < 1:
+ return False
+ return True
+
+
+def validate_agent(agentname, params, log=False):
+ """
+ Call the validate-all action on the agent, given
+ the parameter hash params.
+ agent: either a c:p:t agent name, or an RAInfo instance
+ params: a hash of agent parameters
+ Returns: (rc, out)
+ """
+ def find_agent():
+ if not can_validate_agent(agentname):
+ return None
+ if isinstance(agentname, str):
+ c, p, t = disambiguate_ra_type(agentname)
+ if c != "ocf":
+ raise ValueError("Only OCF agents are supported by this command")
+ agent = RAInfo(c, t, p)
+ if agent.mk_ra_node() is None:
+ return None
+ else:
+ agent = agentname
+ if len(agent.ra_elem.xpath('.//actions/action[@name="validate-all"]')) < 1:
+ raise ValueError("validate-all action not supported by agent")
+ return agent
+ agent = find_agent()
+ if agent is None:
+ return (-1, "")
+
+ my_env = os.environ.copy()
+ my_env["OCF_ROOT"] = config.path.ocf_root
+ for k, v in params.items():
+ my_env["OCF_RESKEY_" + k] = v
+ cmd = [os.path.join(config.path.ocf_root, "resource.d", agent.ra_provider, agent.ra_type), "validate-all"]
+ if options.regression_tests:
+ print(".EXT", " ".join(cmd))
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=my_env)
+ _out, _ = p.communicate()
+ out = to_ascii(_out)
+ p.wait()
+
+ if log is True:
+ for msg in out.splitlines():
+ if msg.startswith("ERROR: "):
+ logger.error(msg[7:])
+ elif msg.startswith("WARNING: "):
+ logger.warning(msg[9:])
+ elif msg.startswith("INFO: "):
+ logger.info(msg[6:])
+ elif msg.startswith("DEBUG: "):
+ logger.debug(msg[7:])
+ else:
+ logger.info(msg)
+ return p.returncode, out
+
+
+DLM_RA_SCRIPTS = """
+primitive {id} ocf:pacemaker:controld \
+op start timeout=90 \
+op stop timeout=100 \
+op monitor interval=60 timeout=60"""
+FILE_SYSTEM_RA_SCRIPTS = """
+primitive {id} ocf:heartbeat:Filesystem \
+params directory="{mnt_point}" fstype="{fs_type}" device="{device}" \
+op monitor interval=20 timeout=40 \
+op start timeout=60 \
+op stop timeout=60"""
+LVMLOCKD_RA_SCRIPTS = """
+primitive {id} ocf:heartbeat:lvmlockd \
+op start timeout=90 \
+op stop timeout=100 \
+op monitor interval=30 timeout=90"""
+LVMACTIVATE_RA_SCRIPTS = """
+primitive {id} ocf:heartbeat:LVM-activate \
+params vgname={vgname} vg_access_mode=lvmlockd activation_mode=shared \
+op start timeout=90s \
+op stop timeout=90s \
+op monitor interval=30s timeout=90s"""
+GROUP_SCRIPTS = """
+group {id} {ra_string}"""
+CLONE_SCRIPTS = """
+clone {id} {group_id} meta interleave=true"""
+
+
+CONFIGURE_RA_TEMPLATE_DICT = {
+ "DLM": DLM_RA_SCRIPTS,
+ "Filesystem": FILE_SYSTEM_RA_SCRIPTS,
+ "LVMLockd": LVMLOCKD_RA_SCRIPTS,
+ "LVMActivate": LVMACTIVATE_RA_SCRIPTS,
+ "GROUP": GROUP_SCRIPTS,
+ "CLONE": CLONE_SCRIPTS
+ }
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/report/__init__.py b/crmsh/report/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crmsh/report/__init__.py
diff --git a/crmsh/report/collect.py b/crmsh/report/collect.py
new file mode 100644
index 0000000..8192681
--- /dev/null
+++ b/crmsh/report/collect.py
@@ -0,0 +1,499 @@
+"""
+Define functions to collect log and info
+Function starts with "collect_" will be called in parallel
+"""
+import sys
+import os
+import shutil
+import re
+import stat
+import pwd
+import datetime
+from subprocess import TimeoutExpired
+from typing import List
+
+import crmsh.user_of_host
+from crmsh import log, sh, corosync
+from crmsh import utils as crmutils
+from crmsh.report import constants, utils, core
+from crmsh.sh import ShellUtils
+from crmsh.service_manager import ServiceManager
+
+
+logger = log.setup_report_logger(__name__)
+
+
+def get_corosync_log() -> str:
+ """
+ Get the path of the corosync log file
+ """
+ corosync_log = ""
+ corosync_conf_path = corosync.conf()
+ if os.path.exists(corosync_conf_path):
+ corosync_log = corosync.get_value("logging.logfile")
+ else:
+ logger.warning(f"File {corosync_conf_path} does not exist")
+ return corosync_log
+
+
+def get_pcmk_log() -> str:
+ """
+ Get the path of the pacemaker log file
+ """
+ pcmk_log_candidates = [
+ "/var/log/pacemaker/pacemaker.log",
+ "/var/log/pacemaker.log"
+ ]
+
+ if os.path.isfile(constants.PCMKCONF):
+ data = crmutils.read_from_file(constants.PCMKCONF)
+ if data:
+ res = re.search(r'^ *PCMK_logfile *= *(.*)', data, re.M)
+ if res:
+ pcmk_log_candidates.insert(0, res.group(1))
+
+ for log in pcmk_log_candidates:
+ if os.path.isfile(log):
+ return log
+
+ logger.warning("No valid pacemaker log file found")
+ return ""
+
+
+def collect_ha_logs(context: core.Context) -> None:
+ """
+ Collect pacemaker, corosync and extra logs
+ """
+ log_list = [get_pcmk_log(), get_corosync_log()] + context.extra_log_list
+ for log in log_list:
+ if os.path.isfile(log):
+ utils.dump_logset(context, log)
+
+
+def collect_journal_logs(context: core.Context) -> None:
+ """
+ Collect journal logs from a specific time range
+ """
+ from_time_str = utils.ts_to_str(context.from_time)
+ to_time_str = utils.ts_to_str(context.to_time)
+ logger.debug2(f"Collect journal logs since: {from_time_str} until: {to_time_str}")
+
+ journal_target_dict = {
+ "default": constants.JOURNAL_F,
+ "pacemaker": constants.JOURNAL_PCMK_F,
+ "corosync": constants.JOURNAL_COROSYNC_F,
+ "sbd": constants.JOURNAL_SBD_F
+ }
+ for item, outf in journal_target_dict.items():
+ journalctl_unit = "" if item == "default" else f" -u {item}"
+ cmd = f'journalctl{journalctl_unit} -o short-iso-precise --since "{from_time_str}" --until "{to_time_str}" --no-pager | tail -n +2'
+ output = utils.get_cmd_output(cmd)
+ logger.debug2(f"Running command: {cmd}")
+ _file = os.path.join(context.work_dir, outf)
+ crmutils.str2file(output, _file)
+ logger.debug(f"Dump jounal log for {item} into {utils.real_path(_file)}")
+
+
+def dump_D_process() -> str:
+ """
+ Dump D-state process stack
+ """
+ out_string = ""
+
+ sh_utils_inst = ShellUtils()
+ _, out, _ = sh_utils_inst.get_stdout_stderr("ps aux|awk '$8 ~ /^D/{print $2}'")
+ len_D_process = len(out.split('\n')) if out else 0
+ out_string += f"Dump D-state process stack: {len_D_process}\n"
+ if len_D_process == 0:
+ return out_string
+
+ for pid in out.split('\n'):
+ _, cmd_out, _ = sh_utils_inst.get_stdout_stderr(f"cat /proc/{pid}/comm")
+ out_string += f"pid: {pid} comm: {cmd_out}\n"
+ _, stack_out, _ = sh_utils_inst.get_stdout_stderr(f"cat /proc/{pid}/stack")
+ out_string += stack_out + "\n\n"
+
+ return out_string
+
+
+def lsof_ocfs2_device() -> str:
+ """
+ List open files for OCFS2 device
+ """
+ out_string = ""
+
+ sh_utils_inst = ShellUtils()
+ _, out, _ = sh_utils_inst.get_stdout_stderr("mount")
+ dev_list = re.findall("^(.*) on .* type ocfs2 ", out, re.MULTILINE)
+ for dev in dev_list:
+ cmd = f"lsof {dev}"
+ out_string += "\n\n#=====[ Command ] ==========================#\n"
+ out_string += f"# {cmd}\n"
+ _, cmd_out, _ = sh_utils_inst.get_stdout_stderr(cmd)
+ if cmd_out:
+ out_string += cmd_out
+
+ return out_string
+
+
+def ocfs2_commands_output() -> str:
+ """
+ Run ocfs2 related commands, return outputs
+ """
+ out_string = ""
+
+ cmds = [
+ "dmesg",
+ "ps -efL",
+ "lsblk -o 'NAME,KNAME,MAJ:MIN,FSTYPE,LABEL,RO,RM,MODEL,SIZE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,MOUNTPOINT'",
+ "mounted.ocfs2 -f",
+ "findmnt",
+ "mount",
+ "cat /sys/fs/ocfs2/cluster_stack"
+ ]
+ for cmd in cmds:
+ cmd_name = cmd.split()[0]
+ if not shutil.which(cmd_name):
+ continue
+ if cmd_name == "cat" and not os.path.exists(cmd.split()[1]):
+ continue
+ out_string += "\n\n#===== [ Command ] ==========================#\n"
+ out_string += f"# {cmd}\n"
+ out_string += utils.get_cmd_output(cmd)
+
+ return out_string
+
+
+def collect_ocfs2_info(context: core.Context) -> None:
+ """
+ Collects OCFS2 information
+ """
+ out_string = ""
+ rc, out, err = ShellUtils().get_stdout_stderr("mounted.ocfs2 -d")
+ if rc != 0:
+ out_string += f"Failed to run \"mounted.ocfs2 -d\": {err}"
+ # No ocfs2 device, just header line printed
+ elif len(out.split('\n')) == 1:
+ out_string += "No ocfs2 partitions found"
+ else:
+ out_string += dump_D_process()
+ out_string += lsof_ocfs2_device()
+ out_string += ocfs2_commands_output()
+
+ ocfs2_f = os.path.join(context.work_dir, constants.OCFS2_F)
+ logger.debug(f"Dump OCFS2 information into {utils.real_path(ocfs2_f)}")
+ crmutils.str2file(out_string, ocfs2_f)
+
+
+def collect_ratraces(context: core.Context) -> None:
+ """
+ Collect ra trace file from default /var/lib/heartbeat/trace_ra and custom one
+ """
+ # since the "trace_dir" attribute been removed from cib after untrace
+ # need to parse crmsh log file to extract custom trace ra log directory on each node
+ if crmsh.user_of_host.instance().use_ssh_agent():
+ shell = sh.ClusterShell(
+ sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK', '')}),
+ crmsh.user_of_host.instance(),
+ forward_ssh_agent=True,
+ )
+ else:
+ shell = sh.cluster_shell()
+ log_contents = ""
+ cmd = f"grep 'INFO: Trace for .* is written to ' {log.CRMSH_LOG_FILE}*|grep -v 'collect'"
+ for node in context.node_list:
+ log_contents += shell.get_rc_stdout_stderr_without_input(node, cmd)[1] + "\n"
+ trace_dir_str = ' '.join(list(set(re.findall("written to (.*)/.*", log_contents))))
+ if not trace_dir_str:
+ return
+
+ logger.debug2("Looking for RA trace files in \"%s\"", trace_dir_str)
+ for f in utils.find_files_in_timespan(context, trace_dir_str.split()):
+ dest_dir = os.path.join(context.work_dir, '/'.join(f.split('/')[-3:-1]))
+ crmutils.mkdirp(dest_dir)
+ shutil.copy2(f, dest_dir)
+ logger.debug(f"Dump RA trace files into {utils.real_path(dest_dir)}")
+
+
+def collect_corosync_blackbox(context: core.Context) -> None:
+ fdata_list = []
+ for f in utils.find_files_in_timespan(context, ["/var/lib/corosync"]):
+ if re.search("fdata", f):
+ fdata_list.append(f)
+ if fdata_list:
+ blackbox_f = os.path.join(context.work_dir, constants.COROSYNC_RECORDER_F)
+ out_string = utils.get_cmd_output("corosync-blackbox")
+ crmutils.str2file(out_string, blackbox_f)
+ logger.debug(f"Dump corosync blackbox info into {utils.real_path(blackbox_f)}")
+
+
+def collect_dlm_info(context: core.Context) -> None:
+ """
+ Get DLM information
+ """
+ if shutil.which("dlm_tool"):
+ name_list = []
+ out_string = "##### NOTICE - Lockspace overview:\n"
+ out_string += utils.get_cmd_output("dlm_tool ls")
+ name_list = re.findall("^name\s*(.*)$", out_string, re.MULTILINE)
+
+ for name in name_list:
+ out_string += f"\n\n## NOTICE - Lockspace {name}\n"
+ lockdebug_cmd = f"dlm_tool lockdebug {name}"
+ out_string += utils.get_cmd_output(lockdebug_cmd)
+
+ out_string += "\n\n##### NOTICE - Lockspace history:\n"
+ out_string += utils.get_cmd_output("dlm_tool dump")
+
+ dlm_f = os.path.join(context.work_dir, constants.DLM_DUMP_F)
+ crmutils.str2file(out_string, dlm_f)
+ logger.debug(f"Dump DLM information into {utils.real_path(dlm_f)}")
+
+
+def collect_perms_state(context: core.Context) -> None:
+ """
+ Check and collect permissions and ownership information for specific directories
+ """
+ results = []
+
+ for check_dir in [context.pcmk_lib_dir, context.pe_dir, context.cib_dir]:
+ if not os.path.isdir(check_dir):
+ result = f"{check_dir} is not a directory or does not exist"
+ else:
+ stat_info = os.stat(check_dir)
+ pwd_inst = pwd.getpwnam('hacluster')
+ expected_uid = pwd_inst.pw_uid
+ expected_gid = pwd_inst.pw_gid
+ expected_mode = 0o750
+
+ uid_match = stat_info.st_uid == expected_uid
+ gid_match = stat_info.st_gid == expected_gid
+ mode_match = stat_info.st_mode & 0o7777 == expected_mode
+
+ if uid_match and gid_match and mode_match:
+ result = "OK"
+ else:
+ result = f"Permissions or ownership for {check_dir} are incorrect"
+ results.append(f"##### Check perms for {check_dir}: {result}\n")
+
+ perms_f = os.path.join(context.work_dir, constants.PERMISSIONS_F)
+ crmutils.str2file(''.join(results), perms_f)
+
+
+def dump_configurations(workdir: str) -> None:
+ config_list = constants.CONFIGURATIONS
+ config_list.append(corosync.conf())
+
+ for conf in config_list:
+ if os.path.isfile(conf):
+ shutil.copy2(conf, workdir)
+ elif os.path.isdir(conf):
+ shutil.copytree(conf, os.path.join(workdir, os.path.basename(conf)))
+
+
+def find_binary_path_for_core(core_file: str) -> str:
+ """
+ Find the binary that generated the given core file
+ """
+ path_str = ""
+ cmd = f"gdb --batch cat {core_file}"
+ _, out, _ = ShellUtils().get_stdout_stderr(cmd)
+ if out:
+ res = re.search("Core was generated by `(.*)'", out, re.M)
+ path_str = res.group(1) if res else ""
+
+ if path_str:
+ return f"Core {core_file} was generated by {path_str}"
+ else:
+ return f"Cannot find the program path for core {core_file}"
+
+
+def dump_core_info(workdir: str, core_file_list: List[str]) -> None:
+ """
+ Dump coredump files information into file
+ """
+ out_string = ""
+ if shutil.which("gdb"):
+ for core_file in core_file_list:
+ out_string += find_binary_path_for_core(core_file) + "\n"
+ out_string += "\nPlease utilize the gdb and debuginfo packages to obtain more detailed information locally"
+ else:
+ msg = "Please install gdb to get more info for coredump files"
+ out_string += msg
+ logger.warning(msg)
+
+ core_f = os.path.join(workdir, constants.COREDUMP_F)
+ crmutils.str2file(out_string, core_f)
+ logger.debug(f"Dump coredump info into {utils.real_path(core_f)}")
+
+
+def collect_coredump_info(context: core.Context) -> None:
+ """
+ Collects coredump files information from the library path of Pacemaker and Corosync
+ """
+ cores = utils.find_files_in_timespan(context, context.cores_dir_list)
+ flist = [f for f in cores if "core" in os.path.basename(f)]
+ if flist:
+ logger.warning(f"Found coredump file: {flist}")
+ dump_core_info(context.work_dir, flist)
+
+
+def dump_runtime_state(workdir: str) -> None:
+ """
+ Dump runtime state files
+ """
+ cluster_shell_inst = sh.cluster_shell()
+ for cmd, f, desc in [
+ ("crm_mon -1", constants.CRM_MON_F, "cluster state"),
+ ("cibadmin -Ql", constants.CIB_F, "CIB contents"),
+ ("crm_node -p", constants.MEMBERSHIP_F, "members of this partition")
+ ]:
+ out = cluster_shell_inst.get_stdout_or_raise_error(cmd)
+ target_f = os.path.join(workdir, f)
+ crmutils.str2file(out, target_f)
+ logger.debug(f"Dump {desc} into {utils.real_path(target_f)}")
+
+ node = crmutils.get_dc()
+ if node and node == crmutils.this_node():
+ crmutils.str2file("", os.path.join(workdir, "DC"))
+ logger.debug(f"Current DC is {node}; Touch file 'DC' in {utils.real_path(workdir)}")
+
+
+def consume_cib_in_workdir(workdir: str) -> None:
+ """
+ Generate 'crm configure show' and 'crm_verify' outputs based on the cib.xml file in the work directory
+ """
+ cib_in_workdir = os.path.join(workdir, constants.CIB_F)
+ if os.path.isfile(cib_in_workdir):
+ cluster_shell_inst = sh.cluster_shell()
+ cmd = f"CIB_file={cib_in_workdir} crm configure show"
+ out = cluster_shell_inst.get_stdout_or_raise_error(cmd)
+ crmutils.str2file(out, os.path.join(workdir, constants.CONFIGURE_SHOW_F))
+
+ cmd = f"crm_verify -V -x {cib_in_workdir}"
+ out = cluster_shell_inst.get_stdout_or_raise_error(cmd)
+ crmutils.str2file(out, os.path.join(workdir, constants.CRM_VERIFY_F))
+
+
+def collect_config(context: core.Context) -> None:
+ """
+ """
+ workdir = context.work_dir
+
+ if ServiceManager().service_is_active("pacemaker.service"):
+ dump_runtime_state(workdir)
+ crmutils.str2file("", os.path.join(workdir, "RUNNING"))
+ logger.debug(f"Touch file 'RUNNING' in {utils.real_path(workdir)}")
+ else:
+ # TODO should determine offline node was ha node
+ shutil.copy2(os.path.join(context.cib_dir, constants.CIB_F), workdir)
+ crmutils.str2file("", os.path.join(workdir, "STOPPED"))
+ logger.debug(f"Touch file 'STOPPED' in {utils.real_path(workdir)}")
+
+ consume_cib_in_workdir(workdir)
+ dump_configurations(workdir)
+
+
+def pe_to_dot(pe_file: str) -> None:
+ dotf = os.path.splitext(pe_file)[0] + '.dot'
+ cmd = f"{constants.PTEST} -D {dotf} -x {pe_file}"
+ code, _, _ = ShellUtils().get_stdout_stderr(cmd)
+ if code != 0:
+ logger.warning("pe_to_dot: %s -> %s failed", pe_file, dotf)
+
+
+def collect_pe_inputs(context: core.Context) -> None:
+ """
+ Collects PE files in the specified directory and generates DOT files if needed
+ """
+ logger.debug2(f"Looking for PE files in {context.pe_dir}")
+
+ _list = utils.find_files_in_timespan(context, [context.pe_dir])
+ pe_file_list = [f for f in _list if not f.endswith(".last")]
+ if pe_file_list:
+ pe_flist_dir = os.path.join(context.work_dir, os.path.basename(context.pe_dir))
+ crmutils.mkdirp(pe_flist_dir)
+
+ gen_dot = len(pe_file_list) <= 20 and not context.speed_up
+ for f in pe_file_list:
+ pe_file_path_in_report = os.path.join(pe_flist_dir, os.path.basename(f))
+ os.symlink(f, pe_file_path_in_report)
+ if gen_dot:
+ pe_to_dot(pe_file_path_in_report)
+ logger.debug2(f"Found {len(pe_file_list)} PE files in {context.pe_dir}")
+ dump_path = f"{context.work_dir}/{os.path.basename(context.pe_dir)}"
+ logger.debug(f"Dump PE files into {utils.real_path(dump_path)}")
+ else:
+ logger.debug2("No PE file found for the giving time")
+
+
+def collect_sbd_info(context: core.Context) -> None:
+ """
+ Collect SBD config file and information
+ """
+ if not os.path.exists(constants.SBDCONF):
+ logger.debug(f"SBD config file {constants.SBDCONF} does not exist")
+ return
+ shutil.copy2(constants.SBDCONF, context.work_dir)
+ if not shutil.which("sbd"):
+ return
+
+ sbd_f = os.path.join(context.work_dir, constants.SBD_F)
+ cmd = ". {};export SBD_DEVICE;{};{}".format(constants.SBDCONF, "sbd dump", "sbd list")
+ with open(sbd_f, "w") as f:
+ f.write("\n\n#=====[ Command ] ==========================#\n")
+ f.write(f"# {cmd}\n")
+ f.write(utils.get_cmd_output(cmd))
+
+ logger.debug(f"Dump SBD config file into {utils.real_path(sbd_f)}")
+
+
+def collect_sys_stats(context: core.Context) -> None:
+ """
+ Collect system statistics
+ """
+ cmd_list = [
+ "hostname", "uptime", "ps axf", "ps auxw", "top -b -n 1",
+ "ip addr", "ip -s link", "ip n show", "lsscsi", "lspci",
+ "mount", "cat /proc/cpuinfo", "df"
+ ]
+
+ out_string = ""
+ for cmd in cmd_list:
+ out_string += f"##### Run \"{cmd}\" #####\n"
+ try:
+ out_string += utils.get_cmd_output(cmd, timeout=5) + "\n"
+ except TimeoutExpired:
+ logger.warning(f"Timeout while running command: {cmd}")
+
+ _file = os.path.join(context.work_dir, constants.SYSSTATS_F)
+ crmutils.str2file(out_string, _file)
+ logger.debug(f"Dump system statistics into {utils.real_path(_file)}")
+
+
+def collect_sys_info(context: core.Context) -> None:
+ """
+ Collect the versions of cluster-related packages and platform information
+ """
+ pkg_inst = utils.Package(constants.PACKAGES)
+ version_info = pkg_inst.version()
+ packages_info = "##### Installed cluster related packages #####\n"
+ packages_info += version_info + '\n\n'
+ if not context.speed_up:
+ packages_info += "##### Verification output of packages #####\n"
+ packages_info += pkg_inst.verify()
+
+ platform, _, release, _, arch = os.uname()
+ sys_info = (
+ f"##### System info #####\n"
+ f"Platform: {platform}\n"
+ f"Kernel release: {release}\n"
+ f"Architecture: {arch}\n"
+ )
+ if platform == "Linux":
+ sys_info += f"Distribution: {utils.get_distro_info()}\n"
+ out_string = f"{sys_info}\n{packages_info}"
+
+ _file = os.path.join(context.work_dir, constants.SYSINFO_F)
+ crmutils.str2file(out_string, _file)
+ logger.debug(f"Dump packages and platform info into {utils.real_path(_file)}")
diff --git a/crmsh/report/constants.py b/crmsh/report/constants.py
new file mode 100644
index 0000000..e80b5c7
--- /dev/null
+++ b/crmsh/report/constants.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2017 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+BIN_CRM = "/usr/sbin/crm"
+BIN_COLLECTOR = f"{BIN_CRM} report __collector"
+COMPRESS_DATA_FLAG = "COMPRESS CRM_REPORT DATA:::"
+LOG_PATTERNS = "CRIT: ERROR: error: warning: crit:"
+PTEST = "crm_simulate"
+SSH_OPTS = "-o StrictHostKeyChecking=no -o EscapeChar=none -o ConnectTimeout=15"
+CHECK_LOG_LINES = 10
+STAMP_TYPE = ""
+
+DECRIPTION_TMPLATE = """
+Please edit this template and describe the issue/problem you
+encountered. Then, post to
+ http://clusterlabs.org/mailman/listinfo/users
+or file a bug at
+ https://github.com/ClusterLabs/crmsh/issues
+
+Thank you.
+
+Date: {0}
+By: crm report {1}
+Subject: [short problem description]
+Severity: [choose one] enhancement minor normal major critical blocking
+--------------------------------------------------------
+
+Detailed description:
+
+"""
+
+PACKAGES = "booth cluster-glue cluster-glue-libs corosync corosync-qdevice corosync-qnetd corosync-testagents crmsh crmsh-scripts csync2 doxygen2man drbd-utils gfs2-kmp-default gfs2-utils hawk-apiserver ldirectord libcfg6 libcmap4 libcorosync_common4 libcpg4 libdlm libdlm3 libqb-tools libqb100 libquorum5 libsam4 libtotem_pg5 libvotequorum8 linstor linstor-common linstor-controller linstor-satellite monitoring-plugins-metadata o2locktop ocfs2-tools ocfs2-tools-o2cb omping pacemaker pacemaker-cli pacemaker-cts pacemaker-libs pacemaker-remote pacemaker-schemas patterns-ha pssh python-pssh python3-linstor python3-linstor-client python3-pacemaker python3-parallax resource-agents resource-agents-zfs ruby2.5-rubygem-sass-listen ruby2.5-rubygem-sass-listen-doc sbd"
+
+ANALYSIS_F = "analysis.txt"
+COREDUMP_F = "coredump_info.txt"
+CIB_F = "cib.xml"
+CONFIGURE_SHOW_F = "configure_show.txt"
+CONFIGURATIONS = [
+ "/etc/drbd.conf",
+ "/etc/drbd.d",
+ "/etc/booth/booth.conf"
+]
+COROSYNC_RECORDER_F = "fdata.txt"
+COROSYNC_F = "corosync.conf"
+CRM_MON_F = "crm_mon.txt"
+CRM_VERIFY_F = "crm_verify.txt"
+DESCRIPTION_F = "description.txt"
+DLM_DUMP_F = "dlm_dump.txt"
+JOURNAL_F = "journal.log"
+JOURNAL_PCMK_F = "journal_pacemaker.log"
+JOURNAL_COROSYNC_F = "journal_corosync.log"
+JOURNAL_SBD_F = "journal_sbd.log"
+MEMBERSHIP_F = "members.txt"
+PERMISSIONS_F = "permissions.txt"
+SBDCONF = "/etc/sysconfig/sbd"
+PCMKCONF = "/etc/sysconfig/pacemaker"
+SYSINFO_F = "sysinfo.txt"
+SYSSTATS_F = "sysstats.txt"
+TIME_F = "time.txt"
+OCFS2_F = "ocfs2.txt"
+SBD_F = "sbd.txt"
+OSRELEASE = "/etc/os-release"
+TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
+RESULT_TIME_SUFFIX = "%a-%d-%b-%Y"
+NAME = "crm report"
+COROSYNC_LIB = "/var/lib/corosync"
+
+DESCRIPTION_HELP = '''Description:
+crm report is a utility to collect all information (logs,
+configuration files, system information, etc) relevant to
+Pacemaker (CRM) over the given period of time.
+'''
+
+EXTRA_HELP = '''
+Examples
+ # collect from 2pm, today
+ {name} -f 2pm report_1
+
+ # collect from "2007/9/5 12:30" to "2007/9/5 14:00"
+ {name} -f "2007/9/5 12:30" -t "2007/9/5 14:00" report_2
+
+ # collect from 1:00 to 3:00, today; include /var/log/cluster/ha-debug as extra log
+ {name} -f 1:00 -t 3:00 -E /var/log/cluster/ha-debug report_3
+
+ # collect from "09sep07 2:00" and use 'hacluster' as ssh user
+ {name} -f "09sep07 2:00" -u hacluster report_4
+
+ # collect from 18:00, today; replace sensitive message like "usern.*" or "admin.*"
+ {name} -f 18:00 -s -p "usern.*" -p "admin.*" report_5
+
+ # collect from 1 mounth ago
+ {name} -f 1m
+
+ # collect from 75 hours ago
+ {name} -f 75H
+
+ # collect from 10 minutes ago
+ {name} -f 10M
+
+ # collect from 2 days ago to 1 day ago
+ {name} -f 2d -t 1d
+'''.format(name=NAME)
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/report/core.py b/crmsh/report/core.py
new file mode 100644
index 0000000..ffc6421
--- /dev/null
+++ b/crmsh/report/core.py
@@ -0,0 +1,510 @@
+#!/usr/bin/python3
+# Copyright (C) 2017 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+
+import argparse
+import multiprocessing
+import os
+import re
+import sys
+import shutil
+import json
+import ast
+from inspect import getmembers, isfunction
+from typing import List
+
+from crmsh import utils as crmutils
+from crmsh import config, log, userdir, corosync, tmpfiles, ui_cluster, sh
+from crmsh.sh import ShellUtils
+
+
+logger = log.setup_report_logger(__name__)
+
+
+class Context:
+ """
+ Class to set/get essential attributes during the whole crm report process
+ """
+ def load(self) -> None:
+ """
+ Load default values
+ """
+ self.name = "crm_report"
+ self.from_time: float = config.report.from_time
+ self.to_time: float = utils.now()
+ self.no_compress: bool = not config.report.compress
+ self.speed_up: bool = config.report.speed_up
+ self.extra_log_list: List[str] = config.report.collect_extra_logs.split()
+ self.rm_exist_dest: bool = config.report.remove_exist_dest
+ self.single: bool= config.report.single_node
+ self.sensitive_regex_list: List[str] = []
+ self.regex_list: List[str] = "CRIT: ERROR: error: warning: crit:".split()
+ self.ssh_askpw_node_list: List[str] = []
+ self.me = crmutils.this_node()
+ self.pe_dir: str
+ self.cib_dir: str
+ self.pcmk_lib_dir: str
+ self.pcmk_exec_dir: str
+ self.cores_dir_list: List[str]
+ self.dest: str
+ self.dest_dir: str
+ self.work_dir: str
+ self.node_list: List[str]
+ self.ssh_user: str
+ self.ssh_option_list: List[str]
+ self.no_log_list: List[str]
+ self.sanitize: bool
+ self.debug: int
+ self.compress_prog: str
+ self.compress_suffix: str
+ self.main_node = self.me
+
+ def __str__(self) ->str:
+ return json.dumps(self.__dict__)
+
+ def __setattr__(self, name: str, value) -> None:
+ """
+ Set the attribute value and perform validations
+ """
+ if name in ["from_time", "to_time"] and value:
+ value = utils.parse_to_timestamp(value)
+ if name == "extra_log_list" and value and hasattr(self, "extra_log_list"):
+ value = list(set(self.extra_log_list) | set(value))
+ super().__setattr__(name, value)
+
+ def __setitem__(self, key: str, value) -> None:
+ self.__dict__[key] = value
+
+
+from crmsh.report import constants, utils, collect
+
+
+class CapitalizedHelpFormatter(argparse.HelpFormatter):
+ def add_usage(self, usage, actions, groups, prefix=None):
+ if prefix is None:
+ prefix = 'Usage: '
+ return super().add_usage(usage.capitalize(), actions, groups, prefix)
+
+ def start_section(self, heading):
+ return super().start_section(heading.capitalize())
+
+
+def add_arguments() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(
+ usage=f"\n{constants.NAME} [options] [dest]",
+ add_help=False,
+ formatter_class=lambda prog: CapitalizedHelpFormatter(prog, width=80)
+ )
+ parser.add_argument("-h", "--help", action="store_true", dest="help",
+ help="Show this help message and exit")
+ parser.add_argument('-f', dest='from_time', metavar='FROM_TIME',
+ help='Time to start from (default: 12 hours ago), can be specific time or delta time until now')
+ parser.add_argument('-t', dest='to_time', metavar='TO_TIME',
+ help='Time to finish at (default: now); can be specific time or delta time until now')
+ parser.add_argument('-d', dest='no_compress', action='store_true',
+ help="Don't compress, but leave result in a directory")
+ parser.add_argument('-n', dest='node_list', metavar='NODE', action=ui_cluster.CustomAppendAction, default=[],
+ help='Node names for this cluster; this option is additive (use -n a -n b or -n "a b")')
+ parser.add_argument('-u', dest='ssh_user', metavar='SSH_USER',
+ help='SSH user to access other nodes')
+ parser.add_argument('-X', dest='ssh_option_list', metavar='SSH_OPTION', action=ui_cluster.CustomAppendAction, default=[],
+ help='Extra ssh(1) options; this option is additive')
+ parser.add_argument('-E', dest='extra_log_list', metavar='FILE', action=ui_cluster.CustomAppendAction, default=[],
+ help='Extra logs to collect; this option is additive')
+ parser.add_argument('-e', dest='no_log_list', metavar='FILE', action=ui_cluster.CustomAppendAction, default=[],
+ help='Don\'t collect these files; this option is additive')
+ parser.add_argument('-s', dest='sanitize', action='store_true',
+ help='Replace sensitive info in PE or CIB or pacemaker log files')
+ parser.add_argument('-p', dest='sensitive_regex_list', metavar='PATT', action=ui_cluster.CustomAppendAction, default=[],
+ help='Regular expression to match variables containing sensitive data (default: passw.*); this option is additive')
+ parser.add_argument('-Q', dest='speed_up', action='store_true',
+ help="The quick mode, which skips producing dot files from PE inputs, verifying installed cluster stack rpms and sanitizing files for sensitive information")
+ parser.add_argument('-Z', dest='rm_exist_dest', action='store_true',
+ help='If destination directories exist, remove them instead of exiting')
+ parser.add_argument('-S', dest='single', action='store_true',
+ help="Single node operation; don't try to start report collectors on other nodes")
+ parser.add_argument('-v', dest='debug', action='count', default=0,
+ help='Increase verbosity')
+ parser.add_argument('dest', nargs='?',
+ help="Report name (which may include the path for storing the report), default format is 'crm_report-current_date,' such as 'crm_report-Mon-09-Oct-2023'")
+
+ args = parser.parse_args()
+ if args.help:
+ print(constants.DESCRIPTION_HELP)
+ parser.print_help()
+ print(constants.EXTRA_HELP)
+ sys.exit(0)
+
+ return args
+
+
+def push_data(context: Context) -> None:
+ """
+ Push data from this node
+ """
+ logger.debug2(f"Pushing data from {context.me}:{context.work_dir} to {context.main_node}")
+ cmd = f'cd {context.work_dir}/.. && tar -h -c {context.me}'
+ _, out, err = ShellUtils().get_stdout_stderr(cmd, raw=True)
+ if out:
+ print(f"{constants.COMPRESS_DATA_FLAG}{out}")
+ if err:
+ raise utils.ReportGenericError(crmutils.to_ascii(err))
+
+
+def pick_compress_prog(context: Context) -> None:
+ """
+ Pick the appropriate compression program and its file suffix
+ """
+ context.compress_prog, context.compress_suffix = pick_first_compress()
+ if not context.compress_prog:
+ context.compress_prog, context.compress_suffix = "cat", ""
+
+
+def pick_first_compress():
+ compress_prog_suffix_dict = {
+ "gzip": ".gz",
+ "bzip2": ".bz2",
+ "xz": ".xz"
+ }
+ for cmd, suffix in compress_prog_suffix_dict.items():
+ if shutil.which(cmd):
+ return cmd, suffix
+ logger.warning("Could not find a compression program")
+ return None, None
+
+
+def finalword(context: Context) -> None:
+ logger.info(f"The report is saved in {context.dest_path}")
+ timespan_str = utils.get_timespan_str(context)
+ logger.info(f"Report timespan: {timespan_str}")
+ nodes_str = ' '.join(context.node_list)
+ logger.info(f"Including nodes: {nodes_str}")
+ logger.info("Thank you for taking time to create this report")
+
+
+def process_results(context: Context) -> None:
+ """
+ Process report results
+ """
+ if not context.speed_up:
+ utils.do_sanitize(context)
+ utils.analyze(context)
+ utils.create_description_template(context)
+
+ if context.no_compress:
+ shutil.move(context.work_dir, context.dest_dir)
+ else:
+ cmd_cd_tar = f"(cd {context.work_dir}/.. && tar cf - {context.dest})"
+ cmd_compress = f"{context.compress_prog} > {context.dest_dir}/{context.dest}.tar{context.compress_suffix}"
+ cmd = f"{cmd_cd_tar}|{cmd_compress}"
+ logger.debug2(f"Running: {cmd}")
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+ finalword(context)
+
+
+def collect_logs_and_info(context: Context) -> None:
+ """
+ Collect logs and information using multiprocessing
+ """
+ # Make sure not to occupy all CPUs
+ pool = multiprocessing.Pool(round(0.8 * multiprocessing.cpu_count()))
+ # result here to store AsyncResult object returned from apply_async
+ # Then calling get() method will catch exceptions like NameError, AttributeError, etc.
+ # Otherwise parent process will not know these exceptions raised
+ # Calling get() right after apply_async will be blocked until child process finished, so
+ # need to append to a list firstly
+ result_list = []
+ # Generate function list from collect.py
+ for cf in [f for f, _ in getmembers(collect, isfunction) if f.startswith("collect_")]:
+ result = pool.apply_async(getattr(collect, cf), (context,))
+ result_list.append(result)
+ pool.close()
+ pool.join()
+
+ for result in result_list:
+ try:
+ result.get()
+ except:
+ utils.print_traceback()
+
+
+def collect_for_nodes(context: Context) -> None:
+ """
+ Start collectors on each node
+ """
+ process_list = []
+ for node in context.node_list:
+ if node in context.ssh_askpw_node_list:
+ node_str = f"{context.ssh_user}@{node}" if context.ssh_user else node
+ logger.info(f"Please provide password for {node_str}")
+ start_collector(node, context)
+ else:
+ p = multiprocessing.Process(target=start_collector, args=(node, context))
+ p.start()
+ process_list.append(p)
+ for p in process_list:
+ p.join()
+
+
+def start_collector(node: str, context: Context) -> None:
+ """
+ Start collector at specific node
+ """
+ cmd = f"{constants.BIN_COLLECTOR} '{context}'"
+ err = ""
+
+ if node == context.me:
+ code, out, err = ShellUtils().get_stdout_stderr(cmd)
+ else:
+ node = f"{context.ssh_user}@{node}" if context.ssh_user else node
+ cmd = cmd.replace('"', '\\"')
+ cmd = f'{crmutils.get_ssh_agent_str()} ssh {constants.SSH_OPTS} {node} "{context.sudo} {cmd}"'
+ code, out, err = sh.LocalShell().get_rc_stdout_stderr(context.ssh_user, cmd)
+
+ if code != 0:
+ logger.warning(err)
+ # ERROR/WARNING/DEBUG messages
+ if err:
+ print(err, file=sys.stderr)
+ if out == '':
+ return
+
+ compress_data = ""
+ for data in out.split("\n"):
+ if data.startswith(constants.COMPRESS_DATA_FLAG):
+ # crm report data from collector
+ compress_data = data.lstrip(constants.COMPRESS_DATA_FLAG)
+ else:
+ # INFO log data from collector
+ print(data)
+
+ try:
+ # Safely evaluate the string representation of a tarball from push_data
+ data_object = ast.literal_eval(compress_data)
+ except (SyntaxError, ValueError) as e:
+ logger.error(f"Error evaluating data: {e}")
+ return
+
+ # Extract the tarball in the specified working directory
+ cmd = f"cd {context.work_dir} && tar x"
+ ShellUtils().get_stdout(cmd, input_s=data_object)
+
+
+def process_dest(context: Context) -> None:
+ """
+ Process destination path and file
+ """
+ if not context.dest:
+ suffix = utils.now(constants.RESULT_TIME_SUFFIX)
+ context.dest = f"{context.name}-{suffix}"
+
+ dest_dir = os.path.dirname(context.dest) or "."
+ if not os.path.isdir(dest_dir):
+ raise utils.ReportGenericError(f"Directory {dest_dir} does not exist")
+ context.dest_dir = dest_dir
+
+ dest_file = os.path.basename(context.dest)
+ if not crmutils.is_filename_sane(dest_file):
+ raise utils.ReportGenericError(f"{dest_file} is invalid file name")
+
+ if context.no_compress and os.path.isdir(context.dest):
+ if context.rm_exist_dest:
+ shutil.rmtree(context.dest)
+ else:
+ raise utils.ReportGenericError(f"Destination directory {context.dest} exists, please cleanup or use -Z option")
+
+ context.dest = dest_file
+ pick_compress_prog(context)
+ if context.no_compress:
+ context.dest_path = f"{context.dest_dir}/{context.dest}"
+ else:
+ context.dest_path = f"{context.dest_dir}/{context.dest}.tar{context.compress_suffix}"
+
+
+def process_node_list(context: Context) -> None:
+ if not context.node_list:
+ context.node_list = crmutils.list_cluster_nodes()
+ if not context.node_list:
+ raise utils.ReportGenericError("Could not figure out a list of nodes; is this a cluster node?")
+ if context.single:
+ context.node_list = [context.me]
+
+ for node in context.node_list[:]:
+ if node == context.me:
+ continue
+ try:
+ crmutils.ping_node(node)
+ except Exception as err:
+ logger.error(str(err))
+ context.node_list.remove(node)
+
+
+def process_arguments(context: Context) -> None:
+ if context.to_time <= context.from_time:
+ raise ValueError("The start time must be before the finish time")
+ process_node_list(context)
+ process_dest(context)
+
+
+def setup_workdir(context: Context) -> None:
+ """
+ Setup working directory where crm report can put all logs into it
+ """
+ tmpdir = tmpfiles.create_dir()
+ if not is_collector():
+ context.work_dir = os.path.join(tmpdir, os.path.basename(context.dest))
+ else:
+ context.work_dir = os.path.join(tmpdir,
+ os.path.basename(context.dest),
+ context.me)
+ crmutils.mkdirp(context.work_dir)
+ logger.debug2(f"Setup work directory in {context.work_dir}")
+
+
+def load_context(context: Context) -> None:
+ """
+ Load context attributes from master process
+ """
+ for key, value in json.loads(sys.argv[2]).items():
+ context[key] = value
+ context.me = crmutils.this_node()
+ adjust_verbosity(context)
+ logger.debug2(f"Loading context from collector: {context}")
+
+
+def find_ssh_user(context: Context) -> None:
+ """
+ Finds the SSH user for passwordless SSH access to nodes in the context's node_list
+ """
+ ssh_user = ""
+ user_try_list = [
+ context.ssh_user,
+ userdir.get_sudoer(),
+ userdir.getuser()
+ ]
+
+ for n in context.node_list:
+ if n == context.me:
+ continue
+ rc = False
+ for u in user_try_list:
+ if not u:
+ continue
+ ssh_str = f"{u}@{n}"
+ if not crmutils.check_ssh_passwd_need(u, u, n):
+ logger.debug(f"ssh {ssh_str} OK")
+ ssh_user = u
+ rc = True
+ break
+ else:
+ logger.debug(f"ssh {ssh_str} failed")
+ if not rc:
+ context.ssh_askpw_node_list.append(n)
+ if context.ssh_askpw_node_list:
+ logger.warning(f"passwordless ssh to node(s) {context.ssh_askpw_node_list} does not work")
+
+ context.sudo = "" if ssh_user in ("root", "hacluster") else "sudo"
+ context.ssh_user = ssh_user or ""
+ logger.debug2(f"context.ssh_user is {context.ssh_user}")
+
+
+def load_from_crmsh_config(context: Context) -> None:
+ """
+ load context attributes from crmsh.config
+ """
+ config_context_map = {
+ "crm_config": "cib_dir",
+ "crm_daemon_dir": "pcmk_exec_dir",
+ "pe_state_dir": "pe_dir"
+ }
+ context_str_map = {
+ "cib_dir": "CIB",
+ "pcmk_exec_dir": "Pacemaker exec",
+ "pe_dir": "PE"
+ }
+ for config_item, context_attr in config_context_map.items():
+ value = getattr(config.path, config_item, None)
+ if not value or not os.path.isdir(value):
+ raise utils.ReportGenericError(f"Cannot find {context_str_map[context_attr]} directory")
+ setattr(context, context_attr, value)
+
+
+def load_context_attributes(context: Context) -> None:
+ """
+ load context attributes from crmsh.config and corosync.conf
+ """
+ load_from_crmsh_config(context)
+
+ context.pcmk_lib_dir = os.path.dirname(context.cib_dir)
+ context.cores_dir_list = [os.path.join(context.pcmk_lib_dir, "cores")]
+ context.cores_dir_list.extend([constants.COROSYNC_LIB] if os.path.isdir(constants.COROSYNC_LIB) else [])
+
+
+def adjust_verbosity(context: Context) -> None:
+ if context.debug > 0:
+ config.report.verbosity = context.debug
+ elif config.core.debug:
+ config.report.verbosity = 1
+ context.debug = 1
+
+
+def parse_arguments(context: Context) -> None:
+ """
+ Add, parse and process arguments
+ """
+ args = add_arguments()
+ crmutils.check_empty_option_value(args)
+ for arg in vars(args):
+ value = getattr(args, arg)
+ if value or not hasattr(context, arg):
+ setattr(context, arg, value)
+ adjust_verbosity(context)
+ process_arguments(context)
+
+
+def is_collector() -> bool:
+ """
+ collector is for collecting logs and data
+ """
+ return len(sys.argv) > 1 and sys.argv[1] == "__collector"
+
+
+def run_impl() -> None:
+ """
+ Major work flow
+ """
+ ctx = Context()
+
+ if is_collector():
+ load_context(ctx)
+ else:
+ ctx.load()
+ parse_arguments(ctx)
+ load_context_attributes(ctx)
+
+ setup_workdir(ctx)
+
+ if is_collector():
+ collect_logs_and_info(ctx)
+ push_data(ctx)
+ else:
+ find_ssh_user(ctx)
+ collect_for_nodes(ctx)
+ process_results(ctx)
+
+
+def run() -> None:
+ """
+ crm report entry
+ """
+ try:
+ run_impl()
+ except UnicodeDecodeError:
+ utils.print_traceback()
+ sys.exit(1)
+ except utils.ReportGenericError as err:
+ if str(err):
+ logger.error(str(err))
+ sys.exit(1)
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/report/utils.py b/crmsh/report/utils.py
new file mode 100644
index 0000000..9cc3743
--- /dev/null
+++ b/crmsh/report/utils.py
@@ -0,0 +1,757 @@
+# Copyright (C) 2017 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+
+import datetime
+import glob
+import os
+import re
+import shutil
+import sys
+import traceback
+from dateutil import tz
+from enum import Enum
+from typing import Optional, List, Tuple
+
+from crmsh import utils as crmutils
+from crmsh import corosync, log, userdir, tmpfiles, config, sh
+from crmsh.report import constants, collect, core
+from crmsh.sh import ShellUtils
+
+
+logger = log.setup_report_logger(__name__)
+
+
+class LogType(Enum):
+ GOOD = 0 # good log; include
+ IRREGULAR = 1 # irregular log; include
+ EMPTY = 2 # empty log; exclude
+ BEFORE_TIMESPAN = 3 # log before timespan; exclude
+ AFTER_TIMESPAN = 4 # log after timespan; exclude
+
+
+class ReportGenericError(Exception):
+ pass
+
+
+def arch_logs(context: core.Context, logf: str) -> Tuple[List[str], LogType]:
+ """
+ Go through archived logs and return those in timespan and the LogType
+ """
+ return_list = []
+ log_type = None
+
+ file_list = [logf] + glob.glob(logf+"*[0-9z]")
+ # like ls -t, newest first
+ for f in sorted(file_list, key=os.path.getmtime, reverse=True):
+ tmp = is_our_log(context, f)
+ if tmp not in (LogType.GOOD, LogType.IRREGULAR):
+ continue
+ log_type = tmp
+ return_list.append(f)
+
+ if return_list:
+ logger.debug2(f"Found logs {return_list} in {get_timespan_str(context)}")
+ return return_list, log_type
+
+
+def analyze(context: core.Context) -> None:
+ """
+ """
+ result_list = []
+
+ result_list.append(compare_and_consolidate_files(context))
+ result_list += check_collected_files(context)
+ result_list += extract_critical_log(context)
+
+ analyze_f = os.path.join(context.work_dir, constants.ANALYSIS_F)
+ crmutils.str2file('\n'.join(result_list), analyze_f)
+
+
+def compare_and_consolidate_files(context: core.Context) -> str:
+ out_string: str = ""
+ workdir = context.work_dir
+ compare_file_list = [
+ constants.MEMBERSHIP_F,
+ constants.CRM_MON_F,
+ constants.COROSYNC_F,
+ constants.SYSINFO_F,
+ constants.CIB_F
+ ]
+
+ for f in compare_file_list:
+ out_string += f"Diff {f}... "
+ if not glob.glob(f"{workdir}/*/{f}"):
+ out_string += f"no {f} found in {workdir}\n"
+ continue
+ rc, out = do_compare(context, f)
+ out_string += f"\n{out}\n" if out else "OK\n"
+ if rc == 0 and f != constants.CIB_F:
+ consolidate(context, f)
+
+ return out_string + '\n'
+
+
+def do_compare(context: core.Context, file: str) -> Tuple[int, str]:
+ """
+ Compare file content between cluster nodes
+ """
+ rc, out_string = 0, ""
+ prev_file_path = None
+
+ for n in context.node_list:
+ current_file_path = os.path.join(context.work_dir, n, file)
+
+ if prev_file_path:
+ rc, out = diff_check(prev_file_path, current_file_path)
+ out_string += f"{out}\n" if out else ""
+ rc += rc
+ else:
+ prev_file_path = current_file_path
+
+ return rc, out_string
+
+
+def check_collected_files(context: core.Context) -> List[str]:
+ """
+ Check collected files for warnings and issues
+ """
+ results = []
+ file_description_dict = {
+ constants.COREDUMP_F: "WARN: Coredump found at",
+ constants.CRM_VERIFY_F: "WARN: crm_verify reported warnings at",
+ constants.PERMISSIONS_F: "Checking problems with permissions/ownership at"
+ }
+
+ for node in context.node_list:
+ for f, desc in file_description_dict.items():
+ f_in_work_dir = os.path.join(context.work_dir, node, f)
+ if os.path.isfile(f_in_work_dir) and not crmutils.file_is_empty(f_in_work_dir):
+ results.append(f"{desc} {node}:")
+ results.append(crmutils.read_from_file(f_in_work_dir))
+
+ return results
+
+
+def extract_critical_log(context: core.Context) -> List[str]:
+ """
+ Extract warnings and errors from collected log files
+ """
+ result_list = []
+ log_pattern_list = [f".*{p}.*" for p in constants.LOG_PATTERNS.split()]
+ log_pattern_str = '|'.join(log_pattern_list)
+
+ for f in glob.glob(f"{context.work_dir}/*/*.log"):
+ _list = re.findall(log_pattern_str, crmutils.read_from_file(f))
+ if _list:
+ result_list.append(f"\nWARNINGS or ERRORS in {'/'.join(f.split('/')[3:])}:")
+ result_list.extend(_list)
+
+ return result_list
+
+
+def cib_diff(file1: str, file2: str) -> Tuple[int, str]:
+ """
+ check if CIB files have same content in the cluster
+ """
+ node1_dir = os.path.dirname(file1)
+ node2_dir = os.path.dirname(file2)
+
+ if (os.path.isfile(os.path.join(node1_dir, "RUNNING")) and
+ os.path.isfile(os.path.join(node2_dir, "RUNNING"))) or \
+ (os.path.isfile(os.path.join(node1_dir, "STOPPED")) and
+ os.path.isfile(os.path.join(node2_dir, "STOPPED"))):
+ cmd = f"crm_diff -c -n {file1} -o {file2}"
+ code, out_string, _ = ShellUtils().get_stdout_stderr(cmd)
+ else:
+ code, out_string = 1, "Can't compare cibs from running and stopped systems\n"
+ return code, out_string
+
+
+def consolidate(context: core.Context, target_file: str) -> None:
+ """
+ Remove duplicates if files are same, make links instead
+ """
+ workdir = context.work_dir
+ for node in context.node_list:
+ target_file_in_path = os.path.join(workdir, node, target_file)
+ if os.path.isfile(os.path.join(workdir, target_file)):
+ os.remove(target_file_in_path)
+ else:
+ shutil.move(target_file_in_path, workdir)
+ os.symlink(f"../{target_file}", target_file_in_path)
+
+
+def diff_check(file1: str, file2: str) -> Tuple[int, str]:
+ """
+ Check the differences between two files
+ """
+ for f in [file1, file2]:
+ if not os.path.isfile(f):
+ return (1, f"{f} does not exist\n")
+
+ diff_func = cib_diff if os.path.basename(file1) == constants.CIB_F else txt_diff
+ return diff_func(file1, file2)
+
+
+def get_distro_info() -> str:
+ """
+ Get distribution information
+ """
+ res = None
+ if os.path.exists(constants.OSRELEASE):
+ logger.debug2(f"Using {constants.OSRELEASE} to get distribution info")
+ res = re.search("PRETTY_NAME=\"(.*)\"", crmutils.read_from_file(constants.OSRELEASE))
+ elif shutil.which("lsb_release"):
+ logger.debug2("Using lsb_release to get distribution info")
+ out = sh.LocalShell().get_stdout_or_raise_error("lsb_release -d")
+ res = re.search("Description:\s+(.*)", out)
+ return res.group(1) if res else "Unknown"
+
+
+def dump_logset(context: core.Context, logf: str) -> None:
+ """
+ Dump the log set into the specified output file
+ """
+ logf_set, logf_type = arch_logs(context, logf)
+ if not logf_set:
+ logger.debug2(f"{logf} is not in timespan {get_timespan_str(context)}")
+ return
+
+ out_string = ""
+
+ if logf_type == LogType.IRREGULAR:
+ for f in logf_set:
+ out_string += print_logseg(f, 0, 0)
+ else:
+ newest, oldest = logf_set[0], logf_set[-1]
+ middle_set = logf_set[1:-1]
+
+ if len(logf_set) == 1:
+ out_string += print_logseg(newest, context.from_time, context.to_time)
+ else:
+ out_string += print_logseg(oldest, context.from_time, 0)
+ for f in middle_set:
+ out_string += print_logseg(f, 0, 0)
+ out_string += print_logseg(newest, 0, context.to_time)
+
+ if out_string:
+ outf = os.path.join(context.work_dir, os.path.basename(logf))
+ crmutils.str2file(out_string.strip('\n'), outf)
+ logger.debug(f"Dump {logf} into {real_path(outf)}")
+
+
+def find_files_in_timespan(context: core.Context, target_dir_list: List[str]) -> List[str]:
+ """
+ Get a list of files in the target directories with creation time in the timespan
+ """
+ file_list = []
+
+ for target_dir in target_dir_list:
+ if not os.path.isdir(target_dir):
+ continue
+
+ for root, dirs, files in os.walk(target_dir):
+ for file in files:
+ file_path = os.path.join(root, file)
+ file_stat = os.stat(file_path)
+ if context.from_time <= file_stat.st_ctime <= context.to_time:
+ file_list.append(file_path)
+
+ return file_list
+
+
+def find_first_timestamp(data: List[str], log_file: str) -> float:
+ """
+ Find the first timestamp in the given list of log line
+ """
+ for line in data:
+ timestamp = get_timestamp(line, log_file)
+ if timestamp:
+ return timestamp
+ return None
+
+
+def filter_lines(data: str, from_line: int, to_line: int) -> str:
+ """
+ Filter lines from the given data based on the specified line range.
+ """
+ lines = data.split('\n')
+ filtered_lines = [
+ line + '\n'
+ for count, line in enumerate(lines, start=1)
+ if from_line <= count <= to_line
+ ]
+ return ''.join(filtered_lines)
+
+
+def determin_log_format(data: str) -> str:
+ """
+ Determines the log format based on the given log line
+ """
+ for line in head(constants.CHECK_LOG_LINES, data):
+ _list = line.split()
+ if not _list:
+ continue
+ # syslog format:
+ # Feb 12 18:30:08 15sp1-1 kernel: e820: BIOS-provided physical RAM map:
+ if len(_list) >= 3 and crmutils.parse_time(' '.join(_list[0:3]), quiet=True):
+ return "syslog"
+ # rfc5424 format:
+ # 2003-10-11T22:14:15.003Z mymachine.example.com su
+ if crmutils.parse_time(_list[0], quiet=True):
+ return "rfc5424"
+ if len(_list) > 1 and crmutils.parse_time(_list[1], quiet=True):
+ return "legacy"
+ return None
+
+
+def findln_by_timestamp(data: str, given_timestamp: float, log_file: str) -> int:
+ """
+ Get line number of the specific time stamp
+ """
+ data_list = data.split('\n')
+ first, last = 1, len(data_list)
+
+ while first <= last:
+ middle = (last + first) // 2
+ trycnt = 10
+ while trycnt > 0:
+ middle_timestamp = get_timestamp(data_list[middle - 1], log_file)
+ if middle_timestamp:
+ break
+ # shift the whole first-last segment
+ trycnt -= 1
+ prevmid = middle
+ while prevmid == middle:
+ first -= 1
+ if first < 1:
+ first = 1
+ last -= 1
+ if last < first:
+ last = first
+ prevmid = middle
+ middle = (last + first) // 2
+ if first == last:
+ break
+
+ if not middle_timestamp:
+ return None
+ if middle_timestamp > given_timestamp:
+ last = middle - 1
+ elif middle_timestamp < given_timestamp:
+ first = middle + 1
+ else:
+ break
+
+ return middle
+
+
+def get_pkg_mgr() -> str:
+ """
+ Get the package manager available in the system
+ """
+ pkg_mgr_candidates = {
+ "rpm": "rpm",
+ "dpkg": "deb"
+ }
+ for pkg_mgr, pkg_mgr_name in pkg_mgr_candidates.items():
+ if shutil.which(pkg_mgr):
+ return pkg_mgr_name
+
+ logger.warning("Unknown package manager!")
+ return ""
+
+
+def get_timestamp_from_time_line(time_line: str, stamp_type: str, log_file: str):
+ timestamp = crmutils.parse_to_timestamp(time_line, quiet=True)
+ if timestamp and stamp_type == "syslog":
+ now = datetime.datetime.now()
+ # got a timestamp in the future
+ if timestamp > now.timestamp():
+ # syslog doesn't have year info, so we need to guess it
+ mtime = os.path.getmtime(log_file)
+ mtime = datetime.datetime.fromtimestamp(mtime)
+ # assume the log is from last year
+ if mtime.year == now.year:
+ time_line += f" {mtime.year-1}"
+ # assume the log is from that year
+ elif mtime.year < now.year:
+ time_line += f" {mtime.year}"
+ # it's impossible that the log is from next year
+ else:
+ return None
+ return crmutils.parse_to_timestamp(time_line, quiet=True)
+ else:
+ return timestamp
+
+
+def get_timestamp(line: str, log_file: str) -> float:
+ """
+ Get timestamp for the given line
+ """
+ if not line or not constants.STAMP_TYPE:
+ return None
+
+ stamp_type = constants.STAMP_TYPE
+ if stamp_type == "rfc5424":
+ time_line = line.split()[0]
+ elif stamp_type == "syslog":
+ time_line = ' '.join(line.split()[0:3])
+ elif stamp_type == "legacy":
+ time_line = line.split()[1]
+
+ return get_timestamp_from_time_line(time_line, stamp_type, log_file)
+
+
+
+def head(n: int, indata: str) -> List[str]:
+ return indata.split('\n')[:n]
+
+
+def is_our_log(context: core.Context, logf: str) -> int:
+ """
+ Check if the log contains a piece of our segment
+
+ Return log type LogType
+ """
+ data = crmutils.read_from_file(logf)
+ if not data:
+ return LogType.EMPTY
+ stamp_type = determin_log_format(data)
+ if not stamp_type:
+ return LogType.IRREGULAR
+ constants.STAMP_TYPE = stamp_type
+
+ first_time = find_first_timestamp(head(constants.CHECK_LOG_LINES, data), logf)
+ last_time = find_first_timestamp(tail(constants.CHECK_LOG_LINES, data), logf)
+ from_time = context.from_time
+ to_time = context.to_time
+
+ if not first_time or not last_time:
+ return LogType.IRREGULAR
+ if from_time > last_time:
+ return LogType.BEFORE_TIMESPAN
+ if from_time >= first_time or to_time >= first_time:
+ return LogType.GOOD
+ return LogType.AFTER_TIMESPAN
+
+
+def create_description_template(context: core.Context) -> None:
+ """
+ Create description template, editing, and other notes
+ """
+ out_string = constants.DECRIPTION_TMPLATE.format(now(), ' '.join(sys.argv[1:]))
+
+ for n in context.node_list:
+ sysinfo_node_f = os.path.join(context.work_dir, n, constants.SYSINFO_F)
+ if os.path.isfile(sysinfo_node_f):
+ out_string += f"[Info from node {n}]:\n"
+ out_string += crmutils.read_from_file(sysinfo_node_f)
+ out_string += "\n\n\n\n"
+
+ description_f = os.path.join(context.work_dir, constants.DESCRIPTION_F)
+ crmutils.str2file(out_string, description_f)
+
+
+def print_logseg(log_file: str, from_time: float, to_time: float) -> str:
+ """
+ Print the log segment specified by the given timestamps
+ """
+ data = crmutils.read_from_file(log_file)
+ if not data:
+ return ""
+
+ from_line = 1 if from_time == 0 else findln_by_timestamp(data, from_time, log_file)
+ to_line = len(data.split('\n')) if to_time == 0 else findln_by_timestamp(data, to_time, log_file)
+
+ if from_line is None or to_line is None:
+ return ""
+
+ logger.debug2("Including segment [%d-%d] from %s", from_line, to_line, log_file)
+ return filter_lines(data, from_line, to_line)
+
+
+def tail(n: int, indata: str) -> List[str]:
+ return indata.split('\n')[-n:]
+
+
+def txt_diff(file1: str, file2: str) -> Tuple[int, str]:
+ cmd = f"diff -bBu {file1} {file2}"
+ rc, out, _ = ShellUtils().get_stdout_stderr(cmd)
+ return rc, out
+
+
+class Sanitizer:
+ """
+ A class containing methods for sanitizing sensitive data in CIB and PE files
+ """
+ DEFAULT_RULE_LIST = ["passw.*"]
+
+ def __init__(self, context: core.Context) -> None:
+ self.file_list_in_workdir = []
+ self.context = context
+ self.cib_data = None
+ self.sensitive_regex_set = set()
+ self.sensitive_value_list_with_raw_option = []
+ self.sensitive_value_list = []
+ self.sensitive_key_list = []
+
+ def prepare(self) -> None:
+ """
+ Prepare the data and files for the sanitization process
+ """
+ self._load_cib_from_work_dir()
+ self._parse_sensitive_set()
+ self._extract_sensitive_value_list()
+
+ if self._include_sensitive_data():
+ if not self.context.sanitize:
+ logger.warning("Some PE/CIB/log files contain possibly sensitive data")
+ logger.warning("Using \"-s\" option can replace sensitive data")
+ return
+ self._get_file_list_in_work_dir()
+ else:
+ self.context.sanitize = False
+
+ def _include_sensitive_data(self) -> List[str]:
+ """
+ Check whether contain sensitive data
+ """
+ return self.sensitive_value_list_with_raw_option or self.sensitive_value_list
+
+ def _get_file_list_in_work_dir(self) -> List[str]:
+ """
+ Get all files in work directory
+ """
+ for dirpath, dirnames, filenames in os.walk(self.context.work_dir):
+ for _file in filenames:
+ self.file_list_in_workdir.append(os.path.join(dirpath, _file))
+
+ def _load_cib_from_work_dir(self) -> None:
+ """
+ Load CIB data from the working directory
+ """
+ cib_file_list = glob.glob(f"{self.context.work_dir}/*/{constants.CIB_F}")
+ if not cib_file_list:
+ raise ReportGenericError(f"CIB file {constants.CIB_F} was not collected")
+ data = crmutils.read_from_file(cib_file_list[0])
+ if not data:
+ raise ReportGenericError(f"File {cib_file_list[0]} is empty")
+ self.cib_data = data
+
+ def _parse_sensitive_set(self) -> None:
+ """
+ Parse sensitive regex from -E option and config.report.sanitize_rule
+ """
+ # from command line option -p
+ patt_set = set(self.context.sensitive_regex_list)
+ # from /etc/crm/crm.conf
+ if config.report.sanitize_rule:
+ patt_set |= set(re.split('\s*\|\s*|\s+', config.report.sanitize_rule.strip('|')))
+ if patt_set:
+ self.context.sanitize = True
+ # Not set from -p option and crm.conf, use default
+ else:
+ patt_set = set(Sanitizer.DEFAULT_RULE_LIST)
+ logger.debug2(f"Regex set to match sensitive data: {patt_set}")
+ self.sensitive_regex_set = patt_set
+
+ def _extract_sensitive_value_list(self) -> None:
+ """
+ Extract sensitive value list from cib data
+ """
+ for patt in self.sensitive_regex_set:
+ if ':' in patt:
+ rule, option = patt.split(':')
+ if option == 'raw':
+ self.sensitive_value_list_with_raw_option += self._extract_from_cib(rule)
+ else:
+ logger.warning(f"For sanitize pattern {patt}, option should be \"raw\"")
+ else:
+ self.sensitive_value_list += self._extract_from_cib(patt)
+ self.sensitive_key_list.append(patt.strip('.*?')+'.*?')
+
+ def _extract_from_cib(self, rule:str) -> List[str]:
+ name_patt = rule.strip('?')+'?'
+ value_list = re.findall(f'name="({name_patt})" value="(.*?)"', self.cib_data)
+ return [value[1] for value in value_list]
+
+ def _sub_sensitive_string(self, data: str) -> str:
+ """
+ Do the replacement job
+
+ For the raw sanitize pattern, replace exactly the value
+ For the key:value nvpair sanitize pattern, replace the value in which line contain the key
+ """
+ result = data
+ replace_raw_n: int = 0
+ replace_n: int = 0
+
+ if self.sensitive_value_list_with_raw_option:
+ patt_str = '|'.join(self.sensitive_value_list_with_raw_option)
+ result, replace_raw_n = re.subn(r'\b({})\b'.format(patt_str), "******", data)
+ if self.sensitive_value_list:
+ key_str = '|'.join(self.sensitive_key_list)
+ patt_str = '|'.join(self.sensitive_value_list)
+ result, replace_n = re.subn(f'({key_str})({patt_str})', '\\1******', result)
+
+ return "" if (replace_raw_n == 0 and replace_n == 0) else result
+
+
+ def sanitize(self) -> None:
+ """
+ Replace and overwrite files containing sensitive data
+ """
+ if not self.context.sanitize:
+ return
+ for f in self.file_list_in_workdir:
+ data = crmutils.read_from_file(f)
+ if not data:
+ continue
+ replaced_str = self._sub_sensitive_string(data)
+ if replaced_str:
+ logger.debug("Replace sensitive info for %s", f)
+ write_to_file(replaced_str, f)
+
+
+def do_sanitize(context: core.Context) -> None:
+ """
+ Perform sanitization by replacing sensitive information in CIB/PE/other logs data with '*'
+ """
+ inst = Sanitizer(context)
+ inst.prepare()
+ inst.sanitize()
+
+
+class Package:
+ """
+ A class to retrieve package versions and verify packages
+ on various distros
+ """
+ def __init__(self, packages: str) -> None:
+ self.pkg_type = get_pkg_mgr()
+ self.packages = packages
+
+ def pkg_ver_deb(self) -> str:
+ cmd = f"dpkg-query -W -f='${{Package}} ${{Version}}.${{Architecture}}\n' {self.packages}"
+ _, out, _ = ShellUtils().get_stdout_stderr(cmd)
+ return '\n'.join([line for line in out.splitlines() if "no packages found" not in line])
+
+ def pkg_ver_rpm(self) -> str:
+ _, out, _ = ShellUtils().get_stdout_stderr(f"rpm -q {self.packages}")
+ return '\n'.join([line for line in out.splitlines() if "not installed" not in line])
+
+ def version(self) -> str:
+ if not self.pkg_type:
+ return ""
+ return getattr(self, f"pkg_ver_{self.pkg_type}")()
+
+ def verify_deb(self) -> str:
+ cmd = f"dpkg --verify {self.packages}"
+ _, out, _ = ShellUtils().get_stdout_stderr(cmd)
+ return '\n'.join([line for line in out.splitlines() if "not installed" not in line])
+
+ def verify_rpm(self) -> str:
+ cmd = f"rpm --verify {self.packages}"
+ _, out, _ = ShellUtils().get_stdout_stderr(cmd)
+ return '\n'.join([line for line in out.splitlines() if "not installed" not in line])
+
+ def verify(self) -> str:
+ if not self.pkg_type:
+ return ""
+ return getattr(self, f"verify_{self.pkg_type}")()
+
+
+def write_to_file(data: str, tofile: str) -> None:
+ _open = crmutils.get_open_method(tofile)
+ with _open(tofile, 'w') as f:
+ if _open == open:
+ f.write(data)
+ else:
+ f.write(data.encode('utf-8'))
+
+
+def parse_to_timestamp(time: str) -> Optional[float]:
+ """
+ Parses the input time string and converts it to a timestamp
+ """
+ time_format_mapping = {
+ 'Y': 365, # 1 year is approximately 365 days
+ 'm': 30, # 1 month is approximately 30 days
+ 'd': 1,
+ 'H': 1 / 24, # 1 hour is 1/24 of a day
+ 'M': 1 / 1440 # 1 minute is 1/1440 of a day
+ }
+
+ # Match the input time string to the format
+ match = re.match(r'^-?([1-9][0-9]*)([YmdHM])$', time)
+
+ if not match:
+ res = crmutils.parse_to_timestamp(time, quiet=True)
+ if res:
+ return res
+ logger.error(f"Invalid time string '{time}'")
+ logger.error('Try these formats like: 2pm; "2019/9/5 12:30"; "09-Sep-07 2:00"; "[1-9][0-9]*[YmdHM]"')
+ raise ReportGenericError
+
+ number_str, flag = match.groups()
+ number = int(number_str) * time_format_mapping[flag]
+ timedelta = datetime.timedelta(days=number)
+
+ # Calculate the timestamp
+ timestamp = (datetime.datetime.now() - timedelta).timestamp()
+
+ return timestamp
+
+
+def ts_to_str(timestamp: float) -> str:
+ """
+ Convert timestamp to date string
+ """
+ return dt_to_str(ts_to_dt(timestamp))
+
+
+def ts_to_dt(timestamp: float) -> datetime.datetime:
+ """
+ Convert timestamp to datetime.datetime object, consider utc offset
+ """
+ dt = crmutils.timestamp_to_datetime(timestamp)
+ dt += tz.tzlocal().utcoffset(dt)
+ return dt
+
+
+def dt_to_str(dt: datetime.datetime, form: str = constants.TIME_FORMAT) -> str:
+ return dt.strftime(form)
+
+
+def now(form: str = constants.TIME_FORMAT) -> str:
+ return dt_to_str(datetime.datetime.now(), form=form)
+
+
+def get_cmd_output(cmd: str, timeout: int = None) -> str:
+ """
+ Get the output of a command, include stdout and stderr
+ """
+ out_str = ""
+ _, out, err = ShellUtils().get_stdout_stderr(cmd, timeout=timeout)
+ if out:
+ out_str += f"{out}\n"
+ if err:
+ out_str += f"{err}\n"
+ return out_str
+
+
+def get_timespan_str(context: core.Context) -> str:
+ from_time_str = ts_to_str(context.from_time)
+ to_time_str = ts_to_str(context.to_time)
+ return f"{from_time_str} - {to_time_str}"
+
+
+def print_traceback():
+ traceback.print_exc()
+ sys.stdout.flush()
+
+
+def real_path(target_file: str) -> str:
+ return '/'.join(target_file.split('/')[3:])
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/rsctest.py b/crmsh/rsctest.py
new file mode 100644
index 0000000..8fa6e2f
--- /dev/null
+++ b/crmsh/rsctest.py
@@ -0,0 +1,478 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import os
+import sys
+from .utils import rmdir_r, quote, this_node, ext_cmd
+from .xmlutil import get_topmost_rsc, get_op_timeout, get_child_nvset_node, is_ms_or_promotable_clone, is_cloned
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+#
+# Resource testing suite
+#
+
+
+class RADriver(object):
+ '''
+ Execute operations on resources.
+ '''
+ pfx = {
+ "instance_attributes": "OCF_RESKEY_",
+ "meta_attributes": "OCF_RESKEY_CRM_meta_",
+ }
+ undef = -200
+ unused = -201
+
+ def __init__(self, rsc_node, nodes):
+ from tempfile import mkdtemp
+ self.rscdef_node = rsc_node
+ if rsc_node is not None:
+ self.ra_class = rsc_node.get("class")
+ self.ra_type = rsc_node.get("type")
+ self.ra_provider = rsc_node.get("provider")
+ self.ident = rsc_node.get("id")
+ else:
+ self.ra_class = None
+ self.ra_type = None
+ self.ra_provider = None
+ self.ident = None
+ self.nodes = nodes
+ self.outdir = mkdtemp(prefix="crmsh_out.")
+ self.errdir = mkdtemp(prefix="crmsh_err.")
+ self.ec_l = {}
+ self.ec_ok = self.unused
+ self.ec_stopped = self.unused
+ self.ec_master = self.unused
+ self.last_op = None
+ self.last_rec = {}
+ self.timeout = 20000
+
+ def __del__(self):
+ rmdir_r(self.outdir)
+ rmdir_r(self.errdir)
+
+ def id_str(self):
+ return self.last_op and "%s:%s" % (self.ident, self.last_op) or self.ident
+
+ def err(self, s):
+ logger.error("%s: %s", self.id_str(), s)
+
+ def warn(self, s):
+ logger.warning("%s: %s", self.id_str(), s)
+
+ def info(self, s):
+ logger.info("%s: %s", self.id_str(), s)
+
+ def debug(self, s):
+ logger.debug("%s: %s", self.id_str(), s)
+
+ def is_ms_or_promotable_clone(self):
+ return is_ms_or_promotable_clone(get_topmost_rsc(self.rscdef_node))
+
+ def nvset2env(self, set_n):
+ if set_n is None:
+ return
+ try:
+ pfx = self.pfx[set_n.tag]
+ except:
+ self.err("unknown attributes set: %s" % set_n.tag)
+ return
+ for nvpair in set_n.iterchildren():
+ if nvpair.tag != "nvpair":
+ continue
+ n = nvpair.get("name")
+ v = nvpair.get("value")
+ self.rscenv["%s%s" % (pfx, n)] = v
+
+ def set_rscenv(self, op):
+ '''
+ Setup the environment. Class specific.
+ '''
+ self.rscenv = {}
+ n = self.rscdef_node
+ self.timeout = get_op_timeout(n, op, "20s")
+ self.rscenv["%stimeout" % self.pfx["meta_attributes"]] = str(self.timeout)
+ if op == "monitor":
+ self.rscenv["%sinterval" % self.pfx["meta_attributes"]] = "10000"
+ if is_cloned(n):
+ # some of the meta attributes for clones/ms are used
+ # by resource agents
+ cn = get_topmost_rsc(n)
+ self.nvset2env(get_child_nvset_node(cn))
+
+ def op_status(self, host):
+ 'Status of the last op.'
+ try:
+ return self.ec_l[host]
+ except:
+ return self.undef
+
+ def explain_op_status(self, host):
+ stat = self.op_status(host)
+ if stat == -9:
+ return "timed out"
+ elif stat == self.undef:
+ return "unknown reason (the RA couldn't run?)"
+ else:
+ return "exit code %d" % stat
+
+ def is_ok(self, host):
+ 'Was last op successful?'
+ return self.op_status(host) == self.ec_ok
+
+ def is_master(self, host):
+ 'Only if last op was probe/monitor.'
+ return self.op_status(host) == self.ec_master
+
+ def is_stopped(self, host):
+ 'Only if last op was probe/monitor.'
+ return self.op_status(host) == self.ec_stopped
+
+ def show_log(self, host):
+ '''
+ Execute an operation.
+ '''
+ try:
+ from .crm_pssh import show_output
+ except ImportError:
+ logger.error("Parallax SSH not installed, rsctest can not be executed")
+ return
+
+ logger.error("host %s (%s)\n" %
+ (host, self.explain_op_status(host)))
+ show_output(self.errdir, [host], "stderr")
+ show_output(self.outdir, [host], "stdout")
+
+ def run_on_all(self, op):
+ '''
+ In case of cloned resources, it doesn't make sense to run
+ (certain) operations on just one node. So, we run them
+ everywhere instead.
+ For instance, some clones require quorum.
+ '''
+ return is_cloned(self.rscdef_node) and op in ("start", "stop")
+
+ def exec_cmd(self, op):
+ '''defined in subclasses'''
+ pass
+
+ def runop(self, op, nodes=None, local_only=False):
+ '''
+ Execute an operation.
+ '''
+ if not nodes or self.run_on_all(op):
+ nodes = self.nodes
+ self.last_op = op
+ self.set_rscenv(op)
+ real_op = (op == "probe" and "monitor" or op)
+ cmd = self.exec_cmd(real_op)
+ logger.debug("running %s on %s", real_op, nodes)
+ for attr in self.rscenv:
+ # shell doesn't allow "-" in var names
+ envvar = attr.replace("-", "_")
+ cmd = "%s=%s %s" % (envvar, quote(self.rscenv[attr]), cmd)
+ if local_only:
+ self.ec_l[this_node()] = ext_cmd(cmd)
+ else:
+ try:
+ from .crm_pssh import do_pssh_cmd
+ except ImportError:
+ logger.error("Parallax SSH not installed, rsctest can not be executed")
+ return
+
+ results = do_pssh_cmd(cmd, nodes, self.outdir, self.errdir, self.timeout)
+ for i, node in enumerate(nodes):
+ try:
+ host, result = results[i]
+ except IndexError:
+ self.ec_l[node] = self.undef
+ else:
+ assert node == host
+ self.ec_l[node] = result.returncode
+ return
+
+ def stop(self, node):
+ """
+ Make sure resource is stopped on node.
+ """
+ if self.is_ms_or_promotable_clone():
+ self.runop("demote", (node,))
+ self.runop("stop", (node,))
+ ok = self.is_ok(node)
+ if not ok:
+ self.err("resource failed to stop on %s, clean it up!" % node)
+ self.show_log(node)
+ return ok
+
+ def test_resource(self, node):
+ """
+ Perform test of resource on node.
+ """
+ self.runop("start", (node,))
+ if self.is_ms_or_promotable_clone() and self.is_ok(node):
+ self.runop("promote", (node,))
+ return self.is_ok(node)
+
+ def probe(self):
+ """
+ Execute probe (if possible)
+ """
+ self.runop("probe")
+
+ def verify_stopped(self, node):
+ """
+ Make sure resource is stopped on node.
+ """
+ stopped = self.is_stopped(node)
+ if not stopped:
+ if self.is_ok(node):
+ self.warn("resource running at %s" % (node))
+ elif self.is_ms_or_promotable_clone() and self.is_master(node):
+ self.warn("resource is master at %s" % (node))
+ else:
+ self.warn("resource not clean at %s" % (node))
+ self.show_log(node)
+ return stopped
+
+
+class RAOCF(RADriver):
+ '''
+ Execute operations on OCF resources.
+ '''
+ # OCF exit codes
+ OCF_SUCCESS = 0
+ OCF_ERR_GENERIC = 1
+ OCF_ERR_ARGS = 2
+ OCF_ERR_UNIMPLEMENTED = 3
+ OCF_ERR_PERM = 4
+ OCF_ERR_INSTALLED = 5
+ OCF_ERR_CONFIGURED = 6
+ OCF_NOT_RUNNING = 7
+ OCF_RUNNING_MASTER = 8
+ OCF_FAILED_MASTER = 9
+
+ def __init__(self, *args):
+ RADriver.__init__(self, *args)
+ self.ec_ok = self.OCF_SUCCESS
+ self.ec_stopped = self.OCF_NOT_RUNNING
+ self.ec_master = self.OCF_RUNNING_MASTER
+
+ def set_rscenv(self, op):
+ RADriver.set_rscenv(self, op)
+ self.nvset2env(get_child_nvset_node(self.rscdef_node, "instance_attributes"))
+ self.rscenv["OCF_RESOURCE_INSTANCE"] = self.ident
+ self.rscenv["OCF_ROOT"] = os.environ["OCF_ROOT"]
+
+ def exec_cmd(self, op):
+ cmd = "%s/resource.d/%s/%s %s" % \
+ (os.environ["OCF_ROOT"], self.ra_provider, self.ra_type, op)
+ return cmd
+
+
+class RALSB(RADriver):
+ '''
+ Execute operations on LSB resources (init scripts).
+ '''
+
+ # OCF exit codes
+ LSB_OK = 0
+ LSB_ERR_GENERIC = 1
+ LSB_ERR_ARGS = 2
+ LSB_ERR_UNIMPLEMENTED = 3
+ LSB_ERR_PERM = 4
+ LSB_ERR_INSTALLED = 5
+ LSB_ERR_CONFIGURED = 6
+ LSB_NOT_RUNNING = 7
+ LSB_STATUS_DEAD_PID = 1
+ LSB_STATUS_DEAD_LOCK = 2
+ LSB_STATUS_NOT_RUNNING = 3
+ LSB_STATUS_UNKNOWN = 4
+
+ def __init__(self, *args):
+ RADriver.__init__(self, *args)
+ self.ec_ok = self.LSB_OK
+ self.ec_stopped = self.LSB_STATUS_NOT_RUNNING
+ self.ec_master = self.unused
+
+ def set_rscenv(self, op):
+ RADriver.set_rscenv(self, op)
+
+ def exec_cmd(self, op):
+ if self.ra_type.startswith("/"):
+ prog = self.ra_type
+ else:
+ prog = "/etc/init.d/%s" % self.ra_type
+ cmd = "%s %s" % (prog, op == "monitor" and "status" or op)
+ return cmd
+
+
+class RASystemd(RADriver):
+ '''
+ Execute operations on systemd resources.
+ '''
+
+ # Error codes are meaningless for systemd...
+ SYSD_OK = 0
+ SYSD_ERR_GENERIC = 1
+ SYSD_NOT_RUNNING = 3
+
+ def __init__(self, *args):
+ RADriver.__init__(self, *args)
+ self.ec_ok = self.SYSD_OK
+ self.ec_stopped = self.SYSD_NOT_RUNNING
+ self.ec_master = self.unused
+
+ def set_rscenv(self, op):
+ RADriver.set_rscenv(self, op)
+
+ def exec_cmd(self, op):
+ op = "status" if op == "monitor" else op
+ cmd = "systemctl %s %s.service" % (op, self.ra_type)
+ return cmd
+
+
+class RAStonith(RADriver):
+ '''
+ Execute operations on Stonith resources.
+ '''
+
+ STONITH_OK = 0
+ STONITH_ERR = 1
+
+ def __init__(self, *args):
+ RADriver.__init__(self, *args)
+ self.ec_ok = self.STONITH_OK
+ self.ec_stopped = self.STONITH_ERR
+
+ def stop(self, node):
+ """
+ Disable for stonith resources.
+ """
+ return True
+
+ def verify_stopped(self, node):
+ """
+ Disable for stonith resources.
+ """
+ return True
+
+ def test_resource(self, node):
+ """
+ Run test for stonith resource
+ """
+ for prefix in ['rhcs/', 'fence_']:
+ if self.ra_type.startswith(prefix):
+ self.err("Cannot test RHCS STONITH resources!")
+ return False
+ return RADriver.test_resource(self, node)
+
+ def set_rscenv(self, op):
+ RADriver.set_rscenv(self, op)
+ for nv in self.rscdef_node.xpath("instance_attributes/nvpair"):
+ self.rscenv[nv.get('name')] = nv.get('value')
+
+ def exec_cmd(self, op):
+ """
+ Probe resource on each node.
+ """
+ return "stonith -t %s -E -S" % (self.ra_type)
+
+
+ra_driver = {
+ "ocf": RAOCF,
+ "lsb": RALSB,
+ "stonith": RAStonith,
+ "systemd": RASystemd
+}
+
+
+def check_test_support(resources):
+ rc = True
+ for r in resources:
+ ra_class = r.get("class")
+ if not ra_class:
+ logger.warning("class attribute not found in %s", r.get('id'))
+ rc = False
+ elif ra_class not in ra_driver:
+ logger.warning("testing of class %s resources not supported", ra_class)
+ rc = False
+ return rc
+
+
+def are_all_stopped(resources, nodes):
+ rc = True
+ logger.info("Probing resources")
+ for r in resources:
+ ra_class = r.get("class")
+ drv = ra_driver[ra_class](r, nodes)
+ drv.probe()
+ for node in nodes:
+ if not drv.verify_stopped(node):
+ rc = False
+ return rc
+
+
+def stop_all(started, node):
+ 'Stop all started resources in reverse order on node.'
+ while started:
+ drv = started.pop()
+ drv.stop(node)
+
+
+def test_resources(resources, nodes, all_nodes):
+ def test_node(node):
+ started = []
+ id_list_str = ' '.join([r.get("id") for r in resources])
+ logger.info(f"Testing on {node}: {id_list_str}")
+ for r in resources:
+ ra_class = r.get("class")
+ drv = ra_driver[ra_class](r, (node,))
+ if drv.test_resource(node):
+ started.append(drv)
+ else:
+ drv.show_log(node)
+ stop_all(started, node)
+ return False
+ stop_all(started, node)
+ return True
+
+ if not check_test_support(resources):
+ return False
+ if not are_all_stopped(resources, all_nodes):
+ logger.error("Stop all resources before testing!\n")
+ return False
+ rc = True
+ for node in nodes:
+ rc |= test_node(node)
+ return rc
+
+
+def call_resource(rsc, cmd, nodes, local_only):
+ """
+ Calls the given operation on the resource.
+ local_only: Only performs the call locally (don't use SSH).
+ """
+ ra_class = rsc.get("class")
+ if ra_class not in ra_driver:
+ logger.error("Calling '%s' for resource not supported", cmd)
+ return False
+ d = ra_driver[ra_class](rsc, [])
+
+ from . import ra
+ agent = ra.get_ra(rsc)
+ actions = list(agent.actions().keys()) + ['meta-data', 'validate-all']
+
+ if cmd not in actions:
+ logger.error("action '%s' not supported by %s", cmd, agent)
+ return False
+ d.runop(cmd, nodes, local_only=local_only)
+ for node in nodes:
+ ok = d.is_ok(node)
+ if not ok:
+ logger.error("%s failed with rc=%d on %s", cmd, d.op_status(node), node)
+ return all(d.is_ok(node) for node in nodes)
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/sbd.py b/crmsh/sbd.py
new file mode 100644
index 0000000..d296fa1
--- /dev/null
+++ b/crmsh/sbd.py
@@ -0,0 +1,633 @@
+import os
+import re
+import shutil
+from . import utils, sh
+from . import bootstrap
+from .bootstrap import SYSCONFIG_SBD, SBD_SYSTEMD_DELAY_START_DIR
+from . import log
+from . import constants
+from . import corosync
+from . import xmlutil
+from .service_manager import ServiceManager
+from .sh import ShellUtils
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+class SBDTimeout(object):
+ """
+ Consolidate sbd related timeout methods and constants
+ """
+ STONITH_WATCHDOG_TIMEOUT_DEFAULT = -1
+ SBD_WATCHDOG_TIMEOUT_DEFAULT = 5
+ SBD_WATCHDOG_TIMEOUT_DEFAULT_S390 = 15
+ SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE = 35
+ QDEVICE_SYNC_TIMEOUT_MARGIN = 5
+
+ def __init__(self, context=None):
+ """
+ Init function
+ """
+ self.context = context
+ self.sbd_msgwait = None
+ self.stonith_timeout = None
+ self.sbd_watchdog_timeout = self.SBD_WATCHDOG_TIMEOUT_DEFAULT
+ self.stonith_watchdog_timeout = self.STONITH_WATCHDOG_TIMEOUT_DEFAULT
+ self.sbd_delay_start = None
+ self.two_node_without_qdevice = False
+
+ def initialize_timeout(self):
+ self._set_sbd_watchdog_timeout()
+ if self.context.diskless_sbd:
+ self._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice()
+ else:
+ self._set_sbd_msgwait()
+
+ def _set_sbd_watchdog_timeout(self):
+ """
+ Set sbd_watchdog_timeout from profiles.yml if exists
+ Then adjust it if in s390 environment
+ """
+ if "sbd.watchdog_timeout" in self.context.profiles_dict:
+ self.sbd_watchdog_timeout = int(self.context.profiles_dict["sbd.watchdog_timeout"])
+ if self.context.is_s390 and self.sbd_watchdog_timeout < self.SBD_WATCHDOG_TIMEOUT_DEFAULT_S390:
+ logger.warning("sbd_watchdog_timeout is set to %d for s390, it was %d", self.SBD_WATCHDOG_TIMEOUT_DEFAULT_S390, self.sbd_watchdog_timeout)
+ self.sbd_watchdog_timeout = self.SBD_WATCHDOG_TIMEOUT_DEFAULT_S390
+
+ def _set_sbd_msgwait(self):
+ """
+ Set sbd msgwait from profiles.yml if exists
+ Default is 2 * sbd_watchdog_timeout
+ """
+ sbd_msgwait_default = 2 * self.sbd_watchdog_timeout
+ sbd_msgwait = sbd_msgwait_default
+ if "sbd.msgwait" in self.context.profiles_dict:
+ sbd_msgwait = int(self.context.profiles_dict["sbd.msgwait"])
+ if sbd_msgwait < sbd_msgwait_default:
+ logger.warning("sbd msgwait is set to %d, it was %d", sbd_msgwait_default, sbd_msgwait)
+ sbd_msgwait = sbd_msgwait_default
+ self.sbd_msgwait = sbd_msgwait
+
+ def _adjust_sbd_watchdog_timeout_with_diskless_and_qdevice(self):
+ """
+ When using diskless SBD with Qdevice, adjust value of sbd_watchdog_timeout
+ """
+ # add sbd after qdevice started
+ if utils.is_qdevice_configured() and ServiceManager().service_is_active("corosync-qdevice.service"):
+ qdevice_sync_timeout = utils.get_qdevice_sync_timeout()
+ if self.sbd_watchdog_timeout <= qdevice_sync_timeout:
+ watchdog_timeout_with_qdevice = qdevice_sync_timeout + self.QDEVICE_SYNC_TIMEOUT_MARGIN
+ logger.warning("sbd_watchdog_timeout is set to {} for qdevice, it was {}".format(watchdog_timeout_with_qdevice, self.sbd_watchdog_timeout))
+ self.sbd_watchdog_timeout = watchdog_timeout_with_qdevice
+ # add sbd and qdevice together from beginning
+ elif self.context.qdevice_inst:
+ if self.sbd_watchdog_timeout < self.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE:
+ logger.warning("sbd_watchdog_timeout is set to {} for qdevice, it was {}".format(self.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE, self.sbd_watchdog_timeout))
+ self.sbd_watchdog_timeout = self.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE
+
+ @staticmethod
+ def get_sbd_msgwait(dev):
+ """
+ Get msgwait for sbd device
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("sbd -d {} dump".format(dev))
+ # Format like "Timeout (msgwait) : 30"
+ res = re.search("\(msgwait\)\s+:\s+(\d+)", out)
+ if not res:
+ raise ValueError("Cannot get sbd msgwait for {}".format(dev))
+ return int(res.group(1))
+
+ @staticmethod
+ def get_sbd_watchdog_timeout():
+ """
+ Get SBD_WATCHDOG_TIMEOUT from /etc/sysconfig/sbd
+ """
+ res = SBDManager.get_sbd_value_from_config("SBD_WATCHDOG_TIMEOUT")
+ if not res:
+ raise ValueError("Cannot get the value of SBD_WATCHDOG_TIMEOUT")
+ return int(res)
+
+ @staticmethod
+ def get_stonith_watchdog_timeout():
+ """
+ For non-bootstrap case, get stonith-watchdog-timeout value from cluster property
+ """
+ default = SBDTimeout.STONITH_WATCHDOG_TIMEOUT_DEFAULT
+ if not ServiceManager().service_is_active("pacemaker.service"):
+ return default
+ value = utils.get_property("stonith-watchdog-timeout")
+ return int(value.strip('s')) if value else default
+
+ def _load_configurations(self):
+ """
+ Load necessary configurations for both disk-based/disk-less sbd
+ """
+ self.two_node_without_qdevice = utils.is_2node_cluster_without_qdevice()
+
+ dev_list = SBDManager.get_sbd_device_from_config()
+ if dev_list: # disk-based
+ self.disk_based = True
+ self.msgwait = SBDTimeout.get_sbd_msgwait(dev_list[0])
+ self.pcmk_delay_max = utils.get_pcmk_delay_max(self.two_node_without_qdevice)
+ else: # disk-less
+ self.disk_based = False
+ self.sbd_watchdog_timeout = SBDTimeout.get_sbd_watchdog_timeout()
+ self.stonith_watchdog_timeout = SBDTimeout.get_stonith_watchdog_timeout()
+ self.sbd_delay_start_value_expected = self.get_sbd_delay_start_expected() if utils.detect_virt() else "no"
+ self.sbd_delay_start_value_from_config = SBDManager.get_sbd_value_from_config("SBD_DELAY_START")
+
+ logger.debug("Inspect SBDTimeout: %s", vars(self))
+
+ def get_stonith_timeout_expected(self):
+ """
+ Get stonith-timeout value for sbd cases, formulas are:
+
+ value_from_sbd = 1.2 * (pcmk_delay_max + msgwait) # for disk-based sbd
+ value_from_sbd = 1.2 * max (stonith_watchdog_timeout, 2*SBD_WATCHDOG_TIMEOUT) # for disk-less sbd
+
+ stonith_timeout = max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ """
+ if self.disk_based:
+ value_from_sbd = int(1.2*(self.pcmk_delay_max + self.msgwait))
+ else:
+ value_from_sbd = int(1.2*max(self.stonith_watchdog_timeout, 2*self.sbd_watchdog_timeout))
+
+ value = max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + corosync.token_and_consensus_timeout()
+ logger.debug("Result of SBDTimeout.get_stonith_timeout_expected %d", value)
+ return value
+
+ @classmethod
+ def get_stonith_timeout(cls):
+ cls_inst = cls()
+ cls_inst._load_configurations()
+ return cls_inst.get_stonith_timeout_expected()
+
+ def get_sbd_delay_start_expected(self):
+ """
+ Get the value for SBD_DELAY_START, formulas are:
+
+ SBD_DELAY_START = (token + consensus + pcmk_delay_max + msgwait) # for disk-based sbd
+ SBD_DELAY_START = (token + consensus + 2*SBD_WATCHDOG_TIMEOUT) # for disk-less sbd
+ """
+ token_and_consensus_timeout = corosync.token_and_consensus_timeout()
+ if self.disk_based:
+ value = token_and_consensus_timeout + self.pcmk_delay_max + self.msgwait
+ else:
+ value = token_and_consensus_timeout + 2*self.sbd_watchdog_timeout
+ return value
+
+ @staticmethod
+ def get_sbd_delay_start_sec_from_sysconfig():
+ """
+ Get suitable systemd start timeout for sbd.service
+ """
+ # TODO 5ms, 5us, 5s, 5m, 5h are also valid for sbd sysconfig
+ value = SBDManager.get_sbd_value_from_config("SBD_DELAY_START")
+ if utils.is_boolean_true(value):
+ return 2*SBDTimeout.get_sbd_watchdog_timeout()
+ return int(value)
+
+ @staticmethod
+ def is_sbd_delay_start():
+ """
+ Check if SBD_DELAY_START is not no or not set
+ """
+ res = SBDManager.get_sbd_value_from_config("SBD_DELAY_START")
+ return res and res != "no"
+
+ def adjust_systemd_start_timeout(self):
+ """
+ Adjust start timeout for sbd when set SBD_DELAY_START
+ """
+ sbd_delay_start_value = SBDManager.get_sbd_value_from_config("SBD_DELAY_START")
+ if sbd_delay_start_value == "no":
+ return
+
+ cmd = "systemctl show -p TimeoutStartUSec sbd --value"
+ out = sh.cluster_shell().get_stdout_or_raise_error(cmd)
+ start_timeout = utils.get_systemd_timeout_start_in_sec(out)
+ if start_timeout > int(sbd_delay_start_value):
+ return
+
+ utils.mkdirp(SBD_SYSTEMD_DELAY_START_DIR)
+ sbd_delay_start_file = "{}/sbd_delay_start.conf".format(SBD_SYSTEMD_DELAY_START_DIR)
+ utils.str2file("[Service]\nTimeoutSec={}".format(int(1.2*int(sbd_delay_start_value))), sbd_delay_start_file)
+ bootstrap.sync_file(SBD_SYSTEMD_DELAY_START_DIR)
+ utils.cluster_run_cmd("systemctl daemon-reload")
+
+ def adjust_stonith_timeout(self):
+ """
+ Adjust stonith-timeout property
+ """
+ utils.set_property("stonith-timeout", self.get_stonith_timeout_expected(), conditional=True)
+
+ def adjust_sbd_delay_start(self):
+ """
+ Adjust SBD_DELAY_START in /etc/sysconfig/sbd
+ """
+ expected_value = str(self.sbd_delay_start_value_expected)
+ config_value = self.sbd_delay_start_value_from_config
+ if expected_value == config_value:
+ return
+ if expected_value == "no" \
+ or (not re.search(r'\d+', config_value)) \
+ or (int(expected_value) > int(config_value)):
+ SBDManager.update_configuration({"SBD_DELAY_START": expected_value})
+
+ @classmethod
+ def adjust_sbd_timeout_related_cluster_configuration(cls):
+ """
+ Adjust sbd timeout related configurations
+ """
+ cls_inst = cls()
+ cls_inst._load_configurations()
+
+ message = "Adjusting sbd related timeout values"
+ with logger_utils.status_long(message):
+ cls_inst.adjust_sbd_delay_start()
+ cls_inst.adjust_stonith_timeout()
+ cls_inst.adjust_systemd_start_timeout()
+
+
+class SBDManager(object):
+ """
+ Class to manage sbd configuration and services
+ """
+ SYSCONFIG_SBD_TEMPLATE = "/usr/share/fillup-templates/sysconfig.sbd"
+ SBD_STATUS_DESCRIPTION = """Configure SBD:
+ If you have shared storage, for example a SAN or iSCSI target,
+ you can use it avoid split-brain scenarios by configuring SBD.
+ This requires a 1 MB partition, accessible to all nodes in the
+ cluster. The device path must be persistent and consistent
+ across all nodes in the cluster, so /dev/disk/by-id/* devices
+ are a good choice. Note that all data on the partition you
+ specify here will be destroyed.
+"""
+ SBD_WARNING = "Not configuring SBD - STONITH will be disabled."
+ DISKLESS_SBD_WARNING = "Diskless SBD requires cluster with three or more nodes. If you want to use diskless SBD for 2-node cluster, should be combined with QDevice."
+ PARSE_RE = "[; ]"
+ DISKLESS_CRM_CMD = "crm configure property stonith-enabled=true stonith-watchdog-timeout={} stonith-timeout={}"
+ SBD_RA = "stonith:external/sbd"
+ SBD_RA_ID = "stonith-sbd"
+
+ def __init__(self, context):
+ """
+ Init function
+
+ sbd_devices is provided by '-s' option on init process
+ diskless_sbd is provided by '-S' option on init process
+ """
+ self.sbd_devices_input = context.sbd_devices
+ self.diskless_sbd = context.diskless_sbd
+ self._sbd_devices = None
+ self._watchdog_inst = None
+ self._context = context
+ self._delay_start = False
+ self.timeout_inst = None
+ self.no_overwrite_map = {}
+ self.no_update_config = False
+
+ @staticmethod
+ def _get_device_uuid(dev, node=None):
+ """
+ Get UUID for specific device and node
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("sbd -d {} dump".format(dev), node)
+ res = re.search("UUID\s*:\s*(.*)\n", out)
+ if not res:
+ raise ValueError("Cannot find sbd device UUID for {}".format(dev))
+ return res.group(1)
+
+ def _compare_device_uuid(self, dev, node_list):
+ """
+ Compare local sbd device UUID with other node's sbd device UUID
+ """
+ if not node_list:
+ return
+ local_uuid = self._get_device_uuid(dev)
+ for node in node_list:
+ remote_uuid = self._get_device_uuid(dev, node)
+ if local_uuid != remote_uuid:
+ raise ValueError("Device {} doesn't have the same UUID with {}".format(dev, node))
+
+ def _verify_sbd_device(self, dev_list, compare_node_list=[]):
+ """
+ Verify sbd device
+ """
+ if len(dev_list) > 3:
+ raise ValueError("Maximum number of SBD device is 3")
+ for dev in dev_list:
+ if not utils.is_block_device(dev):
+ raise ValueError("{} doesn't look like a block device".format(dev))
+ self._compare_device_uuid(dev, compare_node_list)
+
+ def _no_overwrite_check(self, dev):
+ """
+ Check if device already initialized and if need to overwrite
+ """
+ return SBDManager.has_sbd_device_already_initialized(dev) and not bootstrap.confirm("SBD is already configured to use {} - overwrite?".format(dev))
+
+ def _get_sbd_device_interactive(self):
+ """
+ Get sbd device on interactive mode
+ """
+ if self._context.yes_to_all:
+ logger.warning(self.SBD_WARNING)
+ return
+
+ logger.info(self.SBD_STATUS_DESCRIPTION)
+
+ if not bootstrap.confirm("Do you wish to use SBD?"):
+ logger.warning(self.SBD_WARNING)
+ return
+
+ configured_dev_list = self._get_sbd_device_from_config()
+ for dev in configured_dev_list:
+ self.no_overwrite_map[dev] = self._no_overwrite_check(dev)
+ if self.no_overwrite_map and all(self.no_overwrite_map.values()):
+ self.no_update_config = True
+ return configured_dev_list
+
+ dev_list = []
+ dev_looks_sane = False
+ while not dev_looks_sane:
+ dev = bootstrap.prompt_for_string('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', r'none|\/.*')
+ if dev == "none":
+ self.diskless_sbd = True
+ return
+
+ dev_list = utils.re_split_string(self.PARSE_RE, dev)
+ try:
+ self._verify_sbd_device(dev_list)
+ except ValueError as err_msg:
+ logger.error(str(err_msg))
+ continue
+
+ for dev in dev_list:
+ if dev not in self.no_overwrite_map:
+ self.no_overwrite_map[dev] = self._no_overwrite_check(dev)
+ if self.no_overwrite_map[dev]:
+ if dev == dev_list[-1]:
+ return dev_list
+ continue
+ logger.warning("All data on {} will be destroyed!".format(dev))
+ if bootstrap.confirm('Are you sure you wish to use this device?'):
+ dev_looks_sane = True
+ else:
+ dev_looks_sane = False
+ break
+
+ return dev_list
+
+ def _get_sbd_device(self):
+ """
+ Get sbd device from options or interactive mode
+ """
+ dev_list = []
+ if self.sbd_devices_input:
+ dev_list = self.sbd_devices_input
+ self._verify_sbd_device(dev_list)
+ for dev in dev_list:
+ self.no_overwrite_map[dev] = self._no_overwrite_check(dev)
+ if all(self.no_overwrite_map.values()) and dev_list == self._get_sbd_device_from_config():
+ self.no_update_config = True
+ elif not self.diskless_sbd:
+ dev_list = self._get_sbd_device_interactive()
+ self._sbd_devices = dev_list
+
+ def _initialize_sbd(self):
+ """
+ Initialize SBD parameters according to profiles.yml, or the crmsh defined defaulst as the last resort.
+ This covers both disk-based-sbd, and diskless-sbd scenarios.
+ For diskless-sbd, set sbd_watchdog_timeout then return;
+ For disk-based-sbd, also calculate the msgwait value, then initialize the SBD device.
+ """
+ msg = ""
+ if self.diskless_sbd:
+ msg = "Configuring diskless SBD"
+ elif not all(self.no_overwrite_map.values()):
+ msg = "Initializing SBD"
+ if msg:
+ logger.info(msg)
+ self.timeout_inst = SBDTimeout(self._context)
+ self.timeout_inst.initialize_timeout()
+ if self.diskless_sbd:
+ return
+
+ opt = "-4 {} -1 {}".format(self.timeout_inst.sbd_msgwait, self.timeout_inst.sbd_watchdog_timeout)
+
+ for dev in self._sbd_devices:
+ if dev in self.no_overwrite_map and self.no_overwrite_map[dev]:
+ continue
+ rc, _, err = bootstrap.invoke("sbd {} -d {} create".format(opt, dev))
+ if not rc:
+ utils.fatal("Failed to initialize SBD device {}: {}".format(dev, err))
+
+ def _update_sbd_configuration(self):
+ """
+ Update /etc/sysconfig/sbd
+ """
+ if self.no_update_config:
+ bootstrap.sync_file(SYSCONFIG_SBD)
+ return
+
+ utils.copy_local_file(self.SYSCONFIG_SBD_TEMPLATE, SYSCONFIG_SBD)
+ sbd_config_dict = {
+ "SBD_WATCHDOG_DEV": self._watchdog_inst.watchdog_device_name,
+ "SBD_WATCHDOG_TIMEOUT": str(self.timeout_inst.sbd_watchdog_timeout)
+ }
+ if self._sbd_devices:
+ sbd_config_dict["SBD_DEVICE"] = ';'.join(self._sbd_devices)
+ utils.sysconfig_set(SYSCONFIG_SBD, **sbd_config_dict)
+ bootstrap.sync_file(SYSCONFIG_SBD)
+
+ def _get_sbd_device_from_config(self):
+ """
+ Gets currently configured SBD device, i.e. what's in /etc/sysconfig/sbd
+ """
+ res = SBDManager.get_sbd_value_from_config("SBD_DEVICE")
+ if res:
+ return utils.re_split_string(self.PARSE_RE, res)
+ else:
+ return []
+
+ def _restart_cluster_and_configure_sbd_ra(self):
+ """
+ Try to configure sbd resource, restart cluster on needed
+ """
+ if not xmlutil.CrmMonXmlParser().is_any_resource_running():
+ logger.info("Restarting cluster service")
+ utils.cluster_run_cmd("crm cluster restart")
+ bootstrap.wait_for_cluster()
+ self.configure_sbd_resource_and_properties()
+ else:
+ logger.warning("To start sbd.service, need to restart cluster service manually on each node")
+ if self.diskless_sbd:
+ cmd = self.DISKLESS_CRM_CMD.format(self.timeout_inst.stonith_watchdog_timeout, SBDTimeout.get_stonith_timeout())
+ logger.warning("Then run \"{}\" on any node".format(cmd))
+ else:
+ self.configure_sbd_resource_and_properties()
+
+ def _enable_sbd_service(self):
+ """
+ Try to enable sbd service
+ """
+ if self._context.cluster_is_running:
+ # in sbd stage, enable sbd.service on cluster wide
+ utils.cluster_run_cmd("systemctl enable sbd.service")
+ self._restart_cluster_and_configure_sbd_ra()
+ else:
+ # in init process
+ bootstrap.invoke("systemctl enable sbd.service")
+
+ def _warn_diskless_sbd(self, peer=None):
+ """
+ Give warning when configuring diskless sbd
+ """
+ # When in sbd stage or join process
+ if (self.diskless_sbd and self._context.cluster_is_running) or peer:
+ vote_dict = utils.get_quorum_votes_dict(peer)
+ expected_vote = int(vote_dict['Expected'])
+ if (expected_vote < 2 and peer) or (expected_vote < 3 and not peer):
+ logger.warning(self.DISKLESS_SBD_WARNING)
+ # When in init process
+ elif self.diskless_sbd:
+ logger.warning(self.DISKLESS_SBD_WARNING)
+
+ def sbd_init(self):
+ """
+ Function sbd_init includes these steps:
+ 1. Get sbd device from options or interactive mode
+ 2. Initialize sbd device
+ 3. Write config file /etc/sysconfig/sbd
+ """
+ from .watchdog import Watchdog
+
+ if not utils.package_is_installed("sbd"):
+ return
+ self._watchdog_inst = Watchdog(_input=self._context.watchdog)
+ self._watchdog_inst.init_watchdog()
+ self._get_sbd_device()
+ if not self._sbd_devices and not self.diskless_sbd:
+ bootstrap.invoke("systemctl disable sbd.service")
+ return
+ self._warn_diskless_sbd()
+ self._initialize_sbd()
+ self._update_sbd_configuration()
+ self._enable_sbd_service()
+
+ def configure_sbd_resource_and_properties(self):
+ """
+ Configure stonith-sbd resource and related properties
+ """
+ if not utils.package_is_installed("sbd") or \
+ not ServiceManager().service_is_enabled("sbd.service") or \
+ xmlutil.CrmMonXmlParser().is_resource_configured(self.SBD_RA):
+ return
+ shell = sh.cluster_shell()
+
+ # disk-based sbd
+ if self._get_sbd_device_from_config():
+ shell.get_stdout_or_raise_error("crm configure primitive {} {}".format(self.SBD_RA_ID, self.SBD_RA))
+ utils.set_property("stonith-enabled", "true")
+ # disk-less sbd
+ else:
+ if self.timeout_inst is None:
+ self.timeout_inst = SBDTimeout(self._context)
+ self.timeout_inst.initialize_timeout()
+ cmd = self.DISKLESS_CRM_CMD.format(self.timeout_inst.stonith_watchdog_timeout, constants.STONITH_TIMEOUT_DEFAULT)
+ shell.get_stdout_or_raise_error(cmd)
+
+ # in sbd stage
+ if self._context.cluster_is_running:
+ bootstrap.adjust_properties()
+
+ def join_sbd(self, remote_user, peer_host):
+ """
+ Function join_sbd running on join process only
+ On joining process, check whether peer node has enabled sbd.service
+ If so, check prerequisites of SBD and verify sbd device on join node
+ """
+ from .watchdog import Watchdog
+
+ if not utils.package_is_installed("sbd"):
+ return
+ if not os.path.exists(SYSCONFIG_SBD) or not ServiceManager().service_is_enabled("sbd.service", peer_host):
+ bootstrap.invoke("systemctl disable sbd.service")
+ return
+ self._watchdog_inst = Watchdog(remote_user=remote_user, peer_host=peer_host)
+ self._watchdog_inst.join_watchdog()
+ dev_list = self._get_sbd_device_from_config()
+ if dev_list:
+ self._verify_sbd_device(dev_list, [peer_host])
+ else:
+ self._warn_diskless_sbd(peer_host)
+ logger.info("Got {}SBD configuration".format("" if dev_list else "diskless "))
+ bootstrap.invoke("systemctl enable sbd.service")
+
+ @classmethod
+ def verify_sbd_device(cls):
+ """
+ This classmethod is for verifying sbd device on a running cluster
+ Raise ValueError for exceptions
+ """
+ inst = cls(bootstrap.Context())
+ dev_list = inst._get_sbd_device_from_config()
+ if not dev_list:
+ raise ValueError("No sbd device configured")
+ inst._verify_sbd_device(dev_list, utils.list_cluster_nodes_except_me())
+
+ @classmethod
+ def get_sbd_device_from_config(cls):
+ """
+ Get sbd device list from config
+ """
+ inst = cls(bootstrap.Context())
+ return inst._get_sbd_device_from_config()
+
+ @classmethod
+ def is_using_diskless_sbd(cls):
+ """
+ Check if using diskless SBD
+ """
+ inst = cls(bootstrap.Context())
+ dev_list = inst._get_sbd_device_from_config()
+ if not dev_list and ServiceManager().service_is_active("sbd.service"):
+ return True
+ return False
+
+ @staticmethod
+ def update_configuration(sbd_config_dict):
+ """
+ Update and sync sbd configuration
+ """
+ utils.sysconfig_set(SYSCONFIG_SBD, **sbd_config_dict)
+ bootstrap.sync_file(SYSCONFIG_SBD)
+
+ @staticmethod
+ def get_sbd_value_from_config(key):
+ """
+ Get value from /etc/sysconfig/sbd
+ """
+ conf = utils.parse_sysconfig(SYSCONFIG_SBD)
+ res = conf.get(key)
+ return res
+
+ @staticmethod
+ def has_sbd_device_already_initialized(dev):
+ """
+ Check if sbd device already initialized
+ """
+ cmd = "sbd -d {} dump".format(dev)
+ rc, _, _ = ShellUtils().get_stdout_stderr(cmd)
+ return rc == 0
+
+
+def clean_up_existing_sbd_resource():
+ if xmlutil.CrmMonXmlParser().is_resource_configured(SBDManager.SBD_RA):
+ sbd_id_list = xmlutil.CrmMonXmlParser().get_resource_id_list_via_type(SBDManager.SBD_RA)
+ if xmlutil.CrmMonXmlParser().is_resource_started(SBDManager.SBD_RA):
+ for sbd_id in sbd_id_list:
+ utils.ext_cmd("crm resource stop {}".format(sbd_id))
+ utils.ext_cmd("crm configure delete {}".format(' '.join(sbd_id_list)))
diff --git a/crmsh/schema.py b/crmsh/schema.py
new file mode 100644
index 0000000..a53c169
--- /dev/null
+++ b/crmsh/schema.py
@@ -0,0 +1,154 @@
+# Copyright (C) 2012 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import re
+from . import config
+from .pacemaker import CrmSchema, PacemakerError
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+def is_supported(name):
+ """
+ Check if the given name is a supported schema name
+ A short form is also accepted where the prefix
+ pacemaker- is implied.
+
+ Revision: The pacemaker schema version now
+ changes too often for a strict check to make sense.
+ Lets just check look for schemas we know we don't
+ support.
+ """
+ name = re.match(r'pacemaker-(\d+\.\d+)$', name)
+ if name:
+ return float(name.group(1)) > 0.9
+ return True
+
+
+def get_attrs(schema, name):
+ return {
+ 'a': schema.get_elem_attrs(name, 'a'), # all
+ 'r': schema.get_elem_attrs(name, 'r'), # required
+ 'o': schema.get_elem_attrs(name, 'o'), # optional
+ }
+
+
+def get_subs(schema, name):
+ return {
+ 'a': schema.get_sub_elems(name, 'a'), # all
+ 'r': schema.get_sub_elems(name, 'r'), # required
+ 'o': schema.get_sub_elems(name, 'o'), # optional
+ }
+
+
+def get_attr_details_d(schema, name):
+ # some attributes' names don't repeat, can do a hash
+ # (op)
+ d = {}
+ for attr_obj in schema.get_elem_attr_objs(name):
+ attr_name = schema.get_obj_name(attr_obj)
+ d[attr_name] = {
+ 't': schema.get_attr_type(attr_obj), # type
+ 'v': schema.get_attr_values(attr_obj), # values
+ 'd': schema.get_attr_default(attr_obj), # default
+ }
+ return d
+
+
+def get_attr_details_l(schema, name):
+ # some attributes' names repeat, need a list
+ # (date_expression)
+ l = []
+ for attr_obj in schema.get_elem_attr_objs(name):
+ l.append({
+ 'n': schema.get_obj_name(attr_obj), # name
+ 't': schema.get_attr_type(attr_obj), # type
+ 'v': schema.get_attr_values(attr_obj), # values
+ 'd': schema.get_attr_default(attr_obj), # default
+ })
+ return l
+
+
+_cache_funcs = {
+ 'attr': get_attrs,
+ 'sub': get_subs,
+ 'attr_det': get_attr_details_d,
+ 'attr_det_l': get_attr_details_l,
+}
+
+
+_crm_schema = None
+_store = {}
+
+
+def reset():
+ global _store
+ _store = {}
+
+
+def _load_schema(cib):
+ return CrmSchema(cib, config.path.crm_dtd_dir)
+
+
+def init_schema(cib):
+ global _crm_schema
+ try:
+ _crm_schema = _load_schema(cib)
+ except PacemakerError as msg:
+ logger.error(msg)
+ reset()
+
+
+def test_schema(cib):
+ try:
+ crm_schema = _load_schema(cib)
+ except PacemakerError as msg:
+ logger.error(msg)
+ return None
+ return crm_schema.validate_name
+
+
+def validate_name():
+ if _crm_schema is None:
+ return 'pacemaker-2.0'
+ return _crm_schema.validate_name
+
+
+def get(t, name, subset=None):
+ if _crm_schema is None:
+ return []
+ if t not in _store:
+ _store[t] = {}
+ if name not in _store[t]:
+ _store[t][name] = _cache_funcs[t](_crm_schema, name)
+ if subset:
+ return _store[t][name][subset]
+ else:
+ return _store[t][name]
+
+
+def rng_attr_values(el_name, attr_name):
+ ""
+ try:
+ return get('attr_det', el_name)[attr_name]['v']
+ except:
+ return []
+
+
+def rng_attr_values_l(el_name, attr_name):
+ ""
+ l = get('attr_det_l', el_name)
+ l2 = []
+ for el in l:
+ if el['n'] == attr_name:
+ l2 += el['v']
+ return l2
+
+
+def rng_xpath(xpath, namespaces=None):
+ if _crm_schema is None:
+ return []
+ return _crm_schema.rng_xpath(xpath, namespaces=namespaces)
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/scripts.py b/crmsh/scripts.py
new file mode 100644
index 0000000..7add615
--- /dev/null
+++ b/crmsh/scripts.py
@@ -0,0 +1,2169 @@
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import re
+import subprocess
+import getpass
+import time
+import shutil
+import socket
+import random
+from copy import deepcopy
+from glob import glob
+from lxml import etree
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from . import config
+from . import handles
+from . import options
+from . import userdir
+from . import utils
+from . import log
+from .prun import prun
+from .sh import ShellUtils
+import crmsh.parallax
+
+
+logger = log.setup_logger(__name__)
+
+
+_script_cache = None
+_script_version = 2.2
+_strict_handles = False
+
+_action_shortdescs = {
+ 'cib': 'Configure cluster resources',
+ 'install': 'Install packages',
+ 'service': 'Manage system services',
+ 'call': 'Run command on nodes',
+ 'copy': 'Install file on nodes',
+ 'crm': 'Run crm command',
+ 'collect': 'Collect data from nodes',
+ 'verify': 'Verify collected data',
+ 'apply': 'Apply changes to nodes',
+ 'apply_local': 'Apply changes to cluster'
+}
+
+
+class Text(object):
+ """
+ Idea: Replace all fields that may contain
+ references to data with Text objects, that
+ lazily resolve when asked to.
+ Context needed is the script in which this
+ Text resolves. What we do is that we install
+ the parameter values in the script, so we can
+ get it from here.
+
+ This can also then be responsible for the
+ various kinds of output cleanup/formatting
+ (desc, cib, etc)
+ """
+ DESC = 1
+ CIB = 2
+ SHORTDESC = 3
+
+ @staticmethod
+ def shortdesc(script, text):
+ return Text(script, text, kind=Text.SHORTDESC)
+
+ @staticmethod
+ def desc(script, text):
+ return Text(script, text, kind=Text.DESC)
+
+ @staticmethod
+ def cib(script, text):
+ return Text(script, text, kind=Text.CIB)
+
+ @staticmethod
+ def isa(obj):
+ return isinstance(obj, str) or isinstance(obj, Text)
+
+ def __init__(self, script, text, kind=None):
+ self.script = script
+ if isinstance(text, Text):
+ self.text = text.text
+ else:
+ self.text = text
+ self._kind = kind
+
+ def _parse(self):
+ val = self.text
+ if val in (True, False):
+ return "true" if val else "false"
+ if not isinstance(val, str):
+ return str(val)
+ return handles.parse(val, self.script.get('__values__', {})).strip()
+
+ def __repr__(self):
+ return repr(self.text)
+
+ def __str__(self):
+ if self._kind == self.DESC:
+ return format_desc(self._parse())
+ elif self._kind == self.SHORTDESC:
+ return self._parse()
+ elif self._kind == self.CIB:
+ return format_cib(self._parse())
+ return self._parse()
+
+ def __eq__(self, obj):
+ return str(self) == str(obj)
+
+
+class WhenExpr(object):
+ def __init__(self, script, prog):
+ self.script = script
+ self.prog = prog
+
+ def __repr__(self):
+ return repr(self.prog)
+
+ def __str__(self):
+ lenv = self.script.get('__values__', {})
+ inp = handles.parse(self.prog, lenv).strip()
+ try:
+ from .minieval import minieval, InvalidExpression
+ return str(minieval(inp, lenv)).lower()
+ except InvalidExpression as err:
+ raise ValueError(str(err))
+
+
+def _strip(desc):
+ if desc is None:
+ return None
+ return desc.strip()
+
+
+def format_desc(desc):
+ import textwrap
+ return '\n\n'.join([textwrap.fill(para) for para in desc.split('\n\n') if para.strip()])
+
+
+def format_cib(text):
+ text = re.sub(r'[ ]+', ' ', text)
+ text = re.sub(r'\n[ \t\f\v]+', '\n\t', text)
+ i = 0
+ while True:
+ i = text.find('\n\t\n')
+ if i < 0:
+ break
+ text = text[:i] + text[i+2:]
+ return text
+
+
+def space_cib(text):
+ """
+ After merging CIB commands, space separate lines out
+ """
+ return re.sub(r'\n([^\t])', r'\n\n\1', re.sub(r'[\n\r]+', r'\n', text))
+
+
+class Actions(object):
+ """
+ Each method in this class handles a particular action.
+ """
+ @staticmethod
+ def parse(script, action):
+ """
+ action: action data (dict)
+ params: flat list of parameter values
+ values: processed list of parameter values (for handles.parse)
+
+ Converts {'cib': "primitive..."} into {"name": "cib", "value": "primitive..."}
+ Each action has two values: "value" may be a non-textual object
+ depending on the type of action. "text" is visual context to display
+ to a user (so a cleaned up CIB, or the list of packages to install)
+ """
+ name = action['name']
+ action['value'] = action[name]
+ del action[name]
+ action['text'] = ''
+ value = action['value']
+ if name == 'install':
+ if Text.isa(value):
+ action['value'] = str(value).split()
+ action['text'] = ' '.join(action['value'])
+ # service takes a list of objects with a single key;
+ # mapping service: state
+ # the text field will be converted to lines where
+ # each line is <service> -> <state>
+ elif name == 'service':
+ if Text.isa(value):
+ value = [dict([v.split(':', 1)]) for v in str(value).split()]
+ action['value'] = value
+
+ def arrow(v):
+ return ' -> '.join(list(v.items())[0])
+ action['text'] = '\n'.join([arrow(x) for x in value])
+ elif name == 'cib' or name == 'crm':
+ action['text'] = str(Text.cib(script, value))
+ action['value'] = _remove_empty_lines(action['text'])
+ elif name == 'call':
+ action['value'] = Text(script, value)
+ elif name == 'copy':
+ action['value'] = Text(script, value)
+ action['template'] = _make_boolean(action.get('template', False))
+ action['to'] = Text(script, action.get('to', action['value']))
+ action['text'] = "%s -> %s" % (action['value'], action['to'])
+
+ if 'shortdesc' not in action:
+ action['shortdesc'] = _action_shortdescs.get(name, '')
+ else:
+ action['shortdesc'] = Text.shortdesc(script, action['shortdesc'])
+ if 'longdesc' not in action:
+ action['longdesc'] = ''
+ else:
+ action['longdesc'] = Text.desc(script, action['longdesc'])
+
+ hre = handles.headmatcher
+ ident_re = re.compile(r'([a-z_-][a-z0-9_-]*)$', re.IGNORECASE)
+
+ if 'when' in action:
+ when = action['when']
+ if ident_re.match(when):
+ action['when'] = Text(script, '{{%s}}' % (when))
+ elif when:
+ action['when'] = WhenExpr(script, when)
+ else:
+ del action['when']
+ for k, v in action.items():
+ if isinstance(v, str) and hre.search(v):
+ v = Text(script, v)
+ if Text.isa(v):
+ action[k] = str(v).strip()
+
+ @staticmethod
+ def mergeable(action):
+ return action['name'] in ('cib', 'crm', 'install', 'service')
+
+ @staticmethod
+ def merge(into, new):
+ """
+ Merge neighbour actions.
+ Note: When this is called, all text values
+ should already be "reduced", that is, any
+ variable references already resolved.
+ """
+ if into.get('nodes') != new.get('nodes'):
+ return False
+ if into['name'] in ('cib', 'crm'):
+ into['value'] = '\n'.join([str(into['value']), str(new['value'])])
+ into['text'] = space_cib('\n'.join([str(into['text']), str(new['text'])]))
+ elif into['name'] == 'service':
+ into['value'].extend(new['value'])
+ into['text'] = '\n'.join([str(into['text']), str(new['text'])])
+ elif into['name'] == 'install':
+ into['value'].extend(new['value'])
+ into['text'] = ' '.join([str(into['text']), str(new['text'])])
+ if new['shortdesc']:
+ newd = str(new['shortdesc'])
+ if newd != str(into['shortdesc']):
+ into['shortdesc'] = _strip(newd)
+ if new['longdesc']:
+ newd = str(new['longdesc'])
+ if newd != str(into['longdesc']):
+ into['longdesc'] = newd
+ return True
+
+ @staticmethod
+ def needs_sudo(action):
+ if action['name'] == 'call':
+ return action.get('sudo') or action.get('nodes') != 'local'
+ return action['name'] in ('apply', 'apply_local', 'install', 'service')
+
+ def __init__(self, run, action):
+ self._run = run
+ self._action = action
+ self._value = action['value']
+ if not isinstance(self._value, list):
+ self._value = str(self._value)
+ self._text = str(action['text'])
+ self._nodes = str(action.get('nodes', ''))
+
+ def collect(self):
+ "input: shell command"
+ self._run.run_command(self._nodes or 'all', self._value, True)
+ self._run.record_json()
+
+ def validate(self):
+ "input: shell command"
+ self._run.run_command(None, self._value, True)
+ self._run.validate_json()
+
+ def apply(self):
+ "input: shell command"
+ self._run.run_command(self._nodes or 'all', self._value, True)
+ self._run.record_json()
+
+ def apply_local(self):
+ "input: shell command"
+ self._run.run_command(None, self._value, True)
+ self._run.record_json()
+
+ def report(self):
+ "input: shell command"
+ self._run.run_command(None, self._value, False)
+ self._run.report_result()
+
+ def call(self):
+ """
+ input: shell command / script
+
+ TODO: actually allow script here
+ """
+ self._run.call(self._nodes, self._value)
+
+ def copy(self):
+ """
+ copy: <from>
+ to: <path>
+ template: true|false
+
+ TODO: FIXME: Verify that it works...
+ TODO: FIXME: Error handling
+ """
+ if not os.path.exists(self._value):
+ raise ValueError("File not found: %s" % (self._value))
+ if self._action['template']:
+ fn = self._run.str2tmp(str(Text.cib(self._run.script, open(self._value).read())))
+ self._value = fn
+ self._run.copy_file(self._nodes, self._value, str(self._action['to']))
+
+ def _crm_do(self, act):
+ fn = self._run.str2tmp(_join_script_lines(self._value))
+ if config.core.debug:
+ args = '-d --wait --no'
+ else:
+ args = '--wait --no'
+ if self._action.get('force'):
+ args = args + ' --force'
+ self._run.call(None, 'crm %s %s %s' % (args, act, fn))
+
+ def crm(self):
+ """
+ input: crm command sequence
+ """
+ return self._crm_do('-f')
+
+ def cib(self):
+ "input: cli configuration script"
+ return self._crm_do('configure load update')
+
+ def install(self):
+ """
+ input: list of packages
+ or: map of <os>: <packages>
+ """
+ self._run.execute_shell(self._nodes or 'all', '''#!/usr/bin/env python3
+import crm_script
+import crm_init
+
+crm_init.install_packages(%s)
+crm_script.exit_ok(True)
+ ''' % (self._value))
+
+ def service(self):
+ values = []
+ for s in self._value:
+ for v in s.items():
+ values.append(v)
+ services = "\n".join([('crm_script.service%s' % repr(v)) for v in values])
+ self._run.execute_shell(self._nodes or 'all', '''#!/usr/bin/env python3
+import crm_script
+import crm_init
+
+%s
+crm_script.exit_ok(True)
+''' % (services))
+
+ def include(self):
+ """
+ Treated differently: at parse time,
+ the include actions should disappear
+ and be replaced with actions generated
+ from the include. Either from an included
+ script, or a cib generated from an agent
+ include.
+ """
+
+
+_actions = dict([(n, getattr(Actions, n)) for n in dir(Actions) if not n.startswith('_')])
+
+
+def _find_action(action):
+ """return name of action for action"""
+ for a in list(_actions.keys()):
+ if a in action:
+ return a
+ return None
+
+
+def _parse_yaml(scriptname, scriptfile):
+ data = None
+ try:
+ import yaml
+ with open(scriptfile) as f:
+ data = yaml.load(f, Loader=yaml.SafeLoader)
+ if isinstance(data, list):
+ data = data[0]
+ except ImportError as e:
+ raise ValueError("Failed to load yaml module: %s" % (e))
+ except Exception as e:
+ raise ValueError("Failed to parse script main: %s" % (e))
+
+ if data:
+ ver = data.get('version')
+ if ver is None or str(ver) != str(_script_version):
+ data = _upgrade_yaml(data)
+
+ if 'parameters' in data:
+ data['steps'] = [{'parameters': data['parameters']}]
+ del data['parameters']
+ elif 'steps' not in data:
+ data['steps'] = []
+ data['name'] = scriptname
+ data['dir'] = os.path.dirname(scriptfile)
+ return data
+
+
+def _rename(obj, key, to):
+ if key in obj:
+ obj[to] = obj[key]
+ del obj[key]
+
+
+def _upgrade_yaml(data):
+ """
+ Upgrade a parsed yaml document from
+ an older version.
+ """
+ if 'version' in data and data['version'] > _script_version:
+ raise ValueError("Unknown version (expected < %s, got %s)" % (_script_version, data['version']))
+
+ data['version'] = _script_version
+ data['category'] = data.get('category', 'Legacy')
+ _rename(data, 'name', 'shortdesc')
+ _rename(data, 'description', 'longdesc')
+
+ data['actions'] = data.get('steps', [])
+ paramstep = {'parameters': data.get('parameters', [])}
+ data['steps'] = [paramstep]
+ if 'parameters' in data:
+ del data['parameters']
+
+ for p in paramstep['parameters']:
+ _rename(p, 'description', 'shortdesc')
+ _rename(p, 'default', 'value')
+ if 'required' not in p:
+ p['required'] = 'value' not in p
+
+ for action in data['actions']:
+ _rename(action, 'name', 'shortdesc')
+
+ return data
+
+
+_hawk_template_cache = {}
+
+
+def _parse_hawk_template(workflow, name, kind, step, actions):
+ """
+ Convert a hawk template into steps + a cib action
+ """
+ path = os.path.join(os.path.dirname(workflow), '../templates', kind + '.xml')
+ if path in _hawk_template_cache:
+ xml = _hawk_template_cache[path]
+ elif os.path.isfile(path):
+ xml = etree.parse(path).getroot()
+ logger.debug("Found matching template: %s", path)
+ _hawk_template_cache[path] = xml
+ else:
+ raise ValueError("Template does not exist: %s" % (path))
+
+ step['shortdesc'] = _strip(''.join(xml.xpath('./shortdesc/text()')))
+ step['longdesc'] = ''.join(xml.xpath('./longdesc/text()'))
+
+ actions.append({'cib': _hawk_to_handles(name, xml.xpath('./crm_script')[0])})
+
+ for item in xml.xpath('./parameters/parameter'):
+ obj = {}
+ obj['name'] = item.get('name')
+ obj['required'] = item.get('required', False)
+ content = next(item.iter('content'))
+ obj['type'] = content.get('type', 'string')
+ val = content.get('default', content.get('value', None))
+ if val:
+ obj['value'] = val
+ obj['shortdesc'] = _strip(''.join(item.xpath('./shortdesc/text()')))
+ obj['longdesc'] = ''.join(item.xpath('./longdesc/text()'))
+ step['parameters'].append(obj)
+
+
+def _mkhandle(pfx, scope, text):
+ if scope:
+ return '{{%s%s:%s}}' % (pfx, scope, text)
+ else:
+ return '{{%s%s}}' % (pfx, text)
+
+
+def _hawk_to_handles(context, tag):
+ """
+ input: a context name to prefix variable references with (may be empty)
+ and a crm_script tag
+ output: text with {{handles}}
+ """
+ s = ""
+ s += tag.text
+ for c in tag:
+ if c.tag == 'if':
+ cond = c.get('set')
+ if cond:
+ s += _mkhandle('#', context, cond)
+ s += _hawk_to_handles(context, c)
+ s += _mkhandle('/', context, cond)
+ elif c.tag == 'insert':
+ param = c.get('param')
+ src = c.get('from_template') or context
+ s += _mkhandle('', src, param)
+ s += c.tail
+ return s
+
+
+def _parse_hawk_workflow(scriptname, scriptfile):
+ """
+ Reads a hawk workflow into a script.
+
+ TODO: Parse hawk workflows that invoke legacy cluster scripts?
+ """
+ xml = etree.parse(scriptfile).getroot()
+ if xml.tag != "workflow":
+ raise ValueError("Not a hawk workflow: %s" % (scriptfile))
+ data = {
+ 'version': 2.2,
+ 'name': scriptname,
+ 'shortdesc': _strip(''.join(xml.xpath('./shortdesc/text()'))),
+ 'longdesc': ''.join(xml.xpath('./longdesc/text()')),
+ 'category': ''.join(xml.xpath('./@category')) or 'Wizard',
+ 'dir': None,
+ 'steps': [],
+ 'actions': [],
+ }
+
+ # the parameters together form a step with an optional shortdesc
+ # then each template becomes an additional step with an optional shortdesc
+ paramstep = {
+ 'shortdesc': _strip(''.join(xml.xpath('./parameters/stepdesc/text()'))),
+ 'parameters': []
+ }
+ data['steps'].append(paramstep)
+ for item in xml.xpath('./parameters/parameter'):
+ obj = {}
+ obj['name'] = item.get('name')
+ obj['required'] = item.get('required', False)
+ obj['unique'] = item.get('unique', False)
+ content = next(item.iter('content'))
+ obj['type'] = content.get('type', 'string')
+ val = content.get('default', content.get('value', None))
+ if val is not None:
+ obj['value'] = val
+ obj['shortdesc'] = _strip(''.join(item.xpath('./shortdesc/text()')))
+ obj['longdesc'] = ''.join(item.xpath('./longdesc/text()'))
+ paramstep['parameters'].append(obj)
+
+ data['actions'] = []
+
+ for item in xml.xpath('./templates/template'):
+ templatestep = {
+ 'shortdesc': _strip(''.join(item.xpath('./stepdesc/text()'))),
+ 'name': item.get('name'),
+ # Optional steps in the legacy wizards was broken (!?)
+ 'required': True, # item.get('required'),
+ 'parameters': []
+ }
+ data['steps'].append(templatestep)
+
+ _parse_hawk_template(scriptfile, item.get('name'), item.get('type', item.get('name')),
+ templatestep, data['actions'])
+ for override in item.xpath('./override'):
+ name = override.get("name")
+ for param in templatestep['parameters']:
+ if param['name'] == name:
+ param['value'] = override.get("value")
+ param['required'] = False
+ break
+
+ data['actions'].append({'cib': _hawk_to_handles('', xml.xpath('./crm_script')[0])})
+
+ if config.core.debug:
+ import pprint
+ print("Parsed hawk workflow:")
+ pprint.pprint(data)
+ return data
+
+
+def build_script_cache():
+ global _script_cache
+ if _script_cache is not None:
+ return
+ _script_cache = {}
+ for d in _script_dirs():
+ if d:
+ for s in glob(os.path.join(d, '*/main.yml')):
+ name = os.path.dirname(s).split('/')[-1]
+ if name not in _script_cache:
+ _script_cache[name] = os.path.join(d, s)
+ for s in glob(os.path.join(d, '*.yml')):
+ name = os.path.splitext(os.path.basename(s))[0]
+ if name not in _script_cache:
+ _script_cache[name] = os.path.join(d, s)
+ for s in glob(os.path.join(d, 'workflows/*.xml')):
+ name = os.path.splitext(os.path.basename(s))[0]
+ if name not in _script_cache:
+ _script_cache[name] = os.path.join(d, s)
+
+
+def list_scripts():
+ '''
+ List the available cluster installation scripts.
+ Yields the names of the main script files.
+ '''
+ build_script_cache()
+ return sorted(_script_cache.keys())
+
+
+def _meta_text(meta, tag):
+ for c in meta.iterchildren(tag):
+ return c.text
+ return ''
+
+
+def _listfind(needle, haystack, keyfn):
+ for x in haystack:
+ if keyfn(x) == needle:
+ return x
+ return None
+
+
+def _listfindpend(needle, haystack, keyfn, orfn):
+ for x in haystack:
+ if keyfn(x) == needle:
+ return x
+ x = orfn()
+ haystack.append(x)
+ return x
+
+
+def _make_cib_for_agent(name, agent, data, ops):
+ aid = "{{%s:id}}" % (name) if name else "{{id}}"
+ template = ['primitive %s %s' % (aid, agent)]
+ params = []
+ ops = [op.strip() for op in ops.split('\n') if op.strip()]
+ for param in data['parameters']:
+ paramname = param['name']
+ if paramname == 'id':
+ # FIXME: What if the resource actually has a parameter named id?
+ continue
+ path = ':'.join((name, paramname)) if name else paramname
+ params.append('{{#%s}}%s="{{%s}}"{{/%s}}' % (path, paramname, path, path))
+ ret = '\n\t'.join(template + params + ops)
+ return ret
+
+
+def _merge_objects(o1, o2):
+ for key, value in o2.items():
+ o1[key] = value
+
+
+def _lookup_step(name, steps, stepname):
+ for step in steps:
+ if step.get('name', '') == stepname:
+ return step
+ if not stepname and len(steps) == 1:
+ return steps[0]
+ if not stepname:
+ raise ValueError("Parameter '%s' not found" % (name))
+ raise ValueError("Referenced step '%s' not found in '%s'" % (stepname, name))
+
+
+def _process_agent_include(script, include):
+ from . import ra
+ agent = include['agent']
+ info = ra.get_ra(agent)
+ meta = info.meta()
+ if meta is None:
+ raise ValueError("No meta-data for agent: %s" % (agent))
+ name = include.get('name', meta.get('name'))
+ if not name:
+ cls, provider, name = ra.disambiguate_ra_type(agent)
+ if 'name' not in include:
+ include['name'] = name
+ step = _listfindpend(name, script['steps'], lambda x: x.get('name'), lambda: {
+ 'name': name,
+ 'longdesc': '',
+ 'shortdesc': '',
+ 'parameters': [],
+ })
+ step['longdesc'] = include.get('longdesc') or _meta_text(meta, 'longdesc')
+ step['shortdesc'] = _strip(include.get('shortdesc') or _meta_text(meta, 'shortdesc'))
+ step['required'] = include.get('required', True)
+ step['parameters'].append({
+ 'name': 'id',
+ 'shortdesc': 'Identifier for the cluster resource',
+ 'longdesc': '',
+ 'required': True,
+ 'unique': True,
+ 'type': 'resource',
+ })
+
+ def newparamobj(param):
+ pname = param.get('name')
+ return _listfindpend(pname, step['parameters'], lambda x: x.get('name'), lambda: {'name': pname})
+
+ for param in meta.xpath('./parameters/parameter'):
+ pobj = newparamobj(param)
+ pobj['required'] = _make_boolean(param.get('required', False))
+ pobj['unique'] = _make_boolean(param.get('unique', False))
+ pobj['longdesc'] = _meta_text(param, 'longdesc')
+ pobj['shortdesc'] = _strip(_meta_text(param, 'shortdesc'))
+ # set 'advanced' flag on all non-required agent parameters by default
+ # a UI should hide these parameters unless "show advanced" is set
+ pobj['advanced'] = not pobj['required']
+ ctype = param.xpath('./content/@type')
+ cexample = param.xpath('./content/@default')
+ if ctype:
+ pobj['type'] = ctype[0]
+ if cexample:
+ pobj['example'] = cexample[0]
+
+ for param in include.get('parameters', []):
+ pobj = newparamobj(param)
+ # Make any overriden parameters non-advanced
+ # unless explicitly set to advanced
+ pobj['advanced'] = False
+ for key, value in param.items():
+ if key in ('shortdesc', 'longdesc'):
+ pobj[key] = value
+ elif key == 'value':
+ pobj[key] = Text(script, value)
+ else:
+ pobj[key] = value
+ if 'value' in pobj:
+ pobj['required'] = False
+
+ # If the script doesn't have any base parameters
+ # and the name of this step is the same as the
+ # script name itself, then make this the base step
+ hoist = False
+ hoist_from = None
+ if step['name'] == script['name']:
+ zerostep = _listfind('', script['steps'], lambda x: x.get('name', ''))
+ if not zerostep:
+ hoist = True
+ elif zerostep.get('parameters'):
+ zp = zerostep['parameters']
+ for pname in [p['name'] for p in step['parameters']]:
+ if _listfind(pname, zp, lambda x: x['name']):
+ break
+ else:
+ hoist, hoist_from = True, zerostep
+
+ # use step['name'] here in case we did the zerostep hoist
+ step['value'] = Text.cib(script, _make_cib_for_agent('' if hoist else step['name'],
+ agent, step, include.get('ops', '')))
+
+ if hoist:
+ step['name'] = ''
+ if hoist_from:
+ step['parameters'] = hoist_from['parameters'] + step['parameters']
+ script['steps'] = [s for s in script['steps'] if s != hoist_from]
+
+ if not step['name']:
+ del step['name']
+
+ # this works despite possible hoist above,
+ # since name is still the actual name
+ for action in script['actions']:
+ if 'include' in action and action['include'] == name:
+ del action['include']
+ action['cib'] = step['value']
+
+
+def _process_script_include(script, include):
+ script_name = include['script']
+ if 'name' not in include:
+ include['name'] = script_name
+ subscript = load_script(script_name)
+ name = include['name']
+
+ scriptstep = {
+ 'name': name,
+ 'shortdesc': subscript['shortdesc'],
+ 'longdesc': subscript['longdesc'],
+ 'required': _make_boolean(include.get('required', True)),
+ 'steps': deepcopy(subscript['steps']),
+ 'sub-script': subscript,
+ }
+
+ def _merge_step_params(step, params):
+ for param in params:
+ _merge_step_param(step, param)
+
+ def _merge_step_param(step, param):
+ for p in step.get('parameters', []):
+ if p['name'] == param['name']:
+ for key, value in param.items():
+ if key in ('shortdesc', 'longdesc'):
+ p[key] = value
+ elif key == 'value' and Text.isa(value):
+ p[key] = Text(script, value)
+ else:
+ p[key] = value
+ if 'value' in p:
+ p['required'] = False
+ break
+ else:
+ raise ValueError("Referenced parameter '%s' not found in '%s'" % (param['name'], name))
+
+ for incparam in include.get('parameters', []):
+ if 'step' in incparam and 'name' not in incparam:
+ _merge_step_params(_lookup_step(name, scriptstep.get('steps', []), incparam['step']),
+ incparam['parameters'])
+ else:
+ _merge_step_param(_lookup_step(name, scriptstep.get('steps', []), ''),
+ incparam)
+
+ script['steps'].append(scriptstep)
+
+
+def _process_include(script, include):
+ """
+ includes add parameter steps and actions
+ an agent include works like a hawk template:
+ it adds a parameter step
+ a script include however adds any number of
+ parameter steps and actions
+
+ OK. here's what to do: Don't rescope the steps
+ and actions. Instead, keep the actions attached
+ to script step 0, as above. And for each step, add
+ a scope which states its scope. Then, when evaluating
+ handles, build custom environments for those scopes to
+ pass into handles.parse.
+
+ This is just for scripts, no need to do this for agents.
+ Of course, how about scripts that include other scripts?
+ _scope has to be a list which gets expanded...
+ """
+ if 'agent' in include:
+ return _process_agent_include(script, include)
+
+ elif 'script' in include:
+ return _process_script_include(script, include)
+ else:
+ raise ValueError("Unknown include type: %s" % (', '.join(list(include.keys()))))
+
+
+def _postprocess_script_step(script, step):
+ if 'name' in step and not step['name']:
+ del step['name']
+ step['required'] = _make_boolean(step.get('required', True))
+ step['shortdesc'] = _strip(step.get('shortdesc', ''))
+ step['longdesc'] = step.get('longdesc', '')
+ for p in step.get('parameters', []):
+ if 'name' not in p:
+ raise ValueError("Parameter has no name: %s" % (list(p.keys())))
+ p['shortdesc'] = _strip(p.get('shortdesc', ''))
+ p['longdesc'] = p.get('longdesc', '')
+ if 'default' in p and 'value' not in p:
+ p['value'] = p['default']
+ del p['default']
+ if 'value' in p:
+ if p['value'] is None:
+ del p['value']
+ elif isinstance(p['value'], str):
+ p['value'] = Text(script, p['value'])
+ if 'required' not in p:
+ p['required'] = False
+ else:
+ p['required'] = _make_boolean(p['required'])
+ if 'advanced' in p:
+ p['advanced'] = _make_boolean(p['advanced'])
+ else:
+ p['advanced'] = False
+ if 'unique' in p:
+ p['unique'] = _make_boolean(p['unique'])
+ else:
+ p['unique'] = False
+ if 'type' not in p or p['type'] == '':
+ if p['name'] == 'id':
+ p['type'] = 'resource'
+ else:
+ p['type'] = 'string'
+ for s in step.get('steps', []):
+ _postprocess_script_step(script, s)
+
+
+def _postprocess_script_steps(script):
+ def empty(step):
+ if 'parameters' in step and len(step['parameters']) > 0:
+ return False
+ if 'steps' in step and len(step['steps']) > 0:
+ return False
+ return True
+
+ script['steps'] = [step for step in script['steps'] if not empty(step)]
+
+ for step in script['steps']:
+ _postprocess_script_step(script, step)
+
+
+def _postprocess_script(script):
+ """
+ Post-process the parsed script into an executable
+ form. This means parsing all included agents and
+ scripts, merging parameters, steps and actions.
+ """
+ ver = script.get('version')
+ if ver is None or str(ver) != str(_script_version):
+ raise ValueError("Unsupported script version (expected %s, got %s)" % (_script_version, repr(ver)))
+
+ if 'category' not in script:
+ script['category'] = 'Custom'
+
+ if 'actions' not in script:
+ script['actions'] = []
+
+ # if we include subscripts but have no defined actions, assume that's a
+ # mistake and generate include actions for all includes
+ for inc in [{"include": inc['name']} for inc in script.get('include', [])]:
+ script['actions'].append(inc)
+
+ _postprocess_script_steps(script)
+
+ # Includes may add steps, or modify parameters,
+ # but assume that any included data is already
+ # postprocessed. To run this before the
+ # step processing would risk replacing Text() objects
+ # with references to other scripts with references
+ # to this script.
+ for inc in script.get('include', []):
+ _process_include(script, inc)
+
+ for action in script['actions']:
+ if 'include' in action:
+ includes = [inc['name'] for inc in script.get('include', [])]
+ if action['include'] not in includes:
+ raise ValueError("Script references '%s', but only includes: %s" %
+ (action['include'], ', '.join(includes)))
+
+ if 'include' in script:
+ del script['include']
+
+ def _setdesc(name):
+ desc = script.get(name)
+ if desc is None:
+ desc = ''
+ if not desc:
+ if script['steps'] and script['steps'][0][name]:
+ desc = script['steps'][0][name]
+ script['steps'][0][name] = ''
+ script[name] = desc
+ _setdesc('shortdesc')
+ _setdesc('longdesc')
+
+ return script
+
+
+def _join_script_lines(txt):
+ s = ""
+ current_line = ""
+ for line in [line for line in txt.split('\n')]:
+ if not line.strip():
+ pass
+ elif re.match(r'^\s+\S', line):
+ current_line += line
+ else:
+ if current_line.strip():
+ s += current_line + "\n"
+ current_line = line
+ if current_line:
+ s += current_line + "\n"
+ return s
+
+
+def load_script_file(script, filename):
+ if filename.endswith('.yml'):
+ parsed = _parse_yaml(script, filename)
+ elif filename.endswith('.xml'):
+ parsed = _parse_hawk_workflow(script, filename)
+ if parsed is None:
+ raise ValueError("Failed to parse script: %s (%s)" % (script, filename))
+ obj = _postprocess_script(parsed)
+ if 'name' in obj:
+ script = obj['name']
+ if script not in _script_cache or isinstance(_script_cache[script], str):
+ _script_cache[script] = obj
+ return obj
+
+
+def load_script_string(script, yml):
+ build_script_cache()
+ import io
+ import yaml
+ data = yaml.load(io.StringIO(yml), Loader=yaml.SafeLoader)
+ if isinstance(data, list):
+ data = data[0]
+ if 'parameters' in data:
+ data['steps'] = [{'parameters': data['parameters']}]
+ del data['parameters']
+ elif 'steps' not in data:
+ data['steps'] = []
+ data['name'] = script
+ data['dir'] = None
+
+ obj = _postprocess_script(data)
+ if 'name' in obj:
+ script = obj['name']
+ _script_cache[script] = obj
+ return obj
+
+
+def load_script(script):
+ build_script_cache()
+ if script not in _script_cache:
+ logger.debug("cache: %s", list(_script_cache.keys()))
+ raise ValueError("Script not found: %s" % (script))
+ s = _script_cache[script]
+ if isinstance(s, str):
+ try:
+ return load_script_file(script, s)
+ except KeyError as err:
+ raise ValueError("Error when loading script %s: Expected key %s not found" % (script, err))
+ except Exception as err:
+ raise ValueError("Error when loading script %s: %s" % (script, err))
+ return s
+
+
+def _script_dirs():
+ "list of directories that may contain cluster scripts"
+ ret = [d for d in options.scriptdir.split(';') if d and os.path.isdir(d)]
+ return ret + [os.path.join(userdir.CONFIG_HOME, 'scripts'),
+ os.path.join(config.path.sharedir, 'scripts'),
+ config.path.hawk_wizards]
+
+
+def _check_control_persist():
+ '''
+ Checks if ControlPersist is available. If so,
+ we'll use it to make things faster.
+ '''
+ cmd = 'ssh -o ControlPersist'.split()
+ if options.regression_tests:
+ print((".EXT", cmd))
+ cmd = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ return "Bad configuration option" not in err
+
+
+def _parallax_call(printer, hosts, cmd, timeout_seconds):
+ "parallax.call with debug logging"
+ printer.debug("parallax.call(%s, %s)" % (repr(hosts), cmd))
+ return prun.prun({host: cmd for host, _, _ in hosts}, timeout_seconds=timeout_seconds)
+
+
+def _resolve_script(name):
+ for p in list_scripts():
+ if p.endswith('main.yml') and os.path.dirname(p).endswith('/' + name):
+ return p
+ elif p.endswith('.yml') and os.path.splitext(os.path.basename(p))[0] == name:
+ return p
+ elif p.endswith('.xml') and os.path.splitext(os.path.basename(p))[0] == name:
+ return p
+ return None
+
+
+def _parallax_copy(printer, hosts, src, dst, timeout_seconds):
+ "parallax.copy with debug logging"
+ printer.debug("parallax.copy(%s, %s, %s)" % (repr(hosts), src, dst))
+ return prun.pcopy_to_remote(src, [x[0] for x in hosts], dst, recursive=True, timeout_seconds=timeout_seconds)
+
+
+def _tempname(prefix):
+ return '%s-%s%s' % (prefix,
+ hex(int(time.time()))[2:],
+ hex(random.randint(0, 2**48))[2:])
+
+
+def _generate_workdir_name():
+ '''
+ Generate a temporary folder name to use while
+ running the script
+ '''
+ # TODO: make use of /tmp configurable
+ basefile = _tempname('crm-tmp')
+ basetmp = os.path.join(utils.get_tempdir(), basefile)
+ if os.path.isdir(basetmp):
+ raise ValueError("Invalid temporary workdir %s" % (basetmp))
+ return basetmp
+
+
+def _print_debug(printer, local_node, hosts, workdir, timeout_seconds):
+ "Print debug output (if any)"
+ dbglog = os.path.join(workdir, 'crm_script.debug')
+ if hosts:
+ for host, result in _parallax_call(
+ printer, hosts,
+ "if [ -f '%s' ]; then cat '%s'; fi" % (dbglog, dbglog),
+ timeout_seconds
+ ).items():
+ if isinstance(result, crmsh.parallax.Error):
+ printer.error(host, result)
+ else:
+ printer.output(host, result.returncode, result.stdout, result.stderr)
+ if os.path.isfile(dbglog):
+ f = open(dbglog).read()
+ printer.output(local_node, 0, f, '')
+
+
+def _cleanup_local(workdir):
+ "clean up the local tmp dir"
+ if workdir and os.path.isdir(workdir):
+ cleanscript = os.path.join(workdir, 'crm_clean.py')
+ if os.path.isfile(cleanscript):
+ if subprocess.call([cleanscript, workdir], shell=False) != 0:
+ shutil.rmtree(workdir)
+ else:
+ shutil.rmtree(workdir)
+
+
+def _run_cleanup(printer, has_remote_actions, local_node, hosts, workdir, timeout_seconds):
+ "Clean up after the cluster script"
+ if has_remote_actions and hosts and workdir:
+ cleanscript = os.path.join(workdir, 'crm_clean.py')
+ for host, result in _parallax_call(printer, hosts,
+ "%s %s" % (cleanscript,
+ workdir),
+ timeout_seconds).items():
+ if isinstance(result, crmsh.parallax.Error):
+ printer.error(host, "Clean: %s" % (result))
+ else:
+ printer.output(host, result.returncode, result.stdout, result.stderr)
+ _cleanup_local(workdir)
+
+
+def _extract_localnode(hosts):
+ """
+ Remove loal node from hosts list, so
+ we can treat it separately
+ """
+ this_node = utils.this_node()
+ hosts2 = []
+ local_node = None
+ for h, p, u in hosts:
+ if h != this_node:
+ hosts2.append((h, p, u))
+ else:
+ local_node = (h, p, u)
+ logger.debug("Local node: %s, Remote hosts: %s", local_node, ', '.join(h[0] for h in hosts2))
+ return local_node, hosts2
+
+
+# TODO: remove common params?
+# Pass them in a separate list of options?
+# Right now these names are basically reserved..
+def common_params():
+ "Parameters common to all cluster scripts"
+ return [('nodes', None, 'List of nodes to execute the script for'),
+ ('dry_run', 'no', 'If set, simulate execution only'),
+ ('action', None, 'If set, only execute a single action (index, as returned by verify)'),
+ ('statefile', None, 'When single-stepping, the state is saved in the given file'),
+ ('user', config.core.user or None, 'Run script as the given user'),
+ ('sudo', 'no',
+ 'If set, crm will prompt for a sudo password and use sudo when appropriate'),
+ ('port', None, 'Port to connect on'),
+ ('timeout', '600', 'Execution timeout in seconds')]
+
+
+def _common_param_default(name):
+ for param, default, _ in common_params():
+ if param == name:
+ return default
+ return None
+
+
+def _filter_dict(d, name, fn, *args):
+ 'filter the given element in the dict through the function fn'
+ d[name] = fn(d[name], *args)
+
+
+def _filter_nodes(nodes, user, port):
+ 'filter function for the nodes element'
+ if nodes:
+ nodes = nodes.replace(',', ' ').split()
+ else:
+ nodes = utils.list_cluster_nodes()
+ if not nodes:
+ raise ValueError("No hosts")
+ nodes = [(node, port or None, user or None) for node in nodes]
+ return nodes
+
+
+def _scoped_param(context, name):
+ if context:
+ return ':'.join(context) + ':' + name
+ return name
+
+
+def _find_by_name(params, name):
+ try:
+ return next(x for x in params if x.get('name') == name)
+ except StopIteration:
+ return None
+
+
+_IDENT_RE = re.compile(r'^([a-z0-9_#$-][^\s=]*)$', re.IGNORECASE)
+
+
+def is_valid_ipv4_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET, address)
+ except AttributeError:
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count('.') == 3
+ except socket.error: # not a valid address
+ return False
+
+ return True
+
+
+def is_valid_ipv6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error: # not a valid address
+ return False
+ return True
+
+# Types:
+# OCF types
+#
+# string
+# integer
+# boolean
+#
+# Propose to add
+# resource ==> a valid resource identifier
+# ip_address ==> a valid ipv4 or ipv6 address
+# ip_network ==> a valid ipv4 or ipv6 network (or address without /XX)
+# port ==> integer between 0 and 65535
+# email ==> a valid email address
+
+# node ==> name of a node in the cluster
+# select <value>, <value>, <value>, ... ==> any of the values in the list.
+# range <n> <m> ==> integer in range
+# rx <rx> ==> anything matching the regular expression.
+
+
+def _valid_integer(value):
+ try:
+ return True, int(value, base=0)
+ except ValueError:
+ return False, value
+
+
+def _valid_ip(value):
+ return is_valid_ipv4_address(value) or is_valid_ipv6_address(value)
+
+
+def _verify_type(param, value, errors):
+ if value is None:
+ value = ''
+ vtype = param.get('type')
+ if not vtype:
+ return value
+ elif vtype == 'integer':
+ ok, _ = _valid_integer(value)
+ if not ok:
+ errors.append("%s=%s is not an integer" % (param.get('name'), value))
+ elif vtype == 'string':
+ return value
+ elif vtype == 'boolean':
+ return "true" if _make_boolean(value) else "false"
+ elif vtype == 'resource':
+ try:
+ if not _IDENT_RE.match(value):
+ errors.append("%s=%s invalid resource identifier" % (param.get('name'), value))
+ except TypeError as e:
+ errors.append("%s=%s %s" % (param.get('name'), value, str(e)))
+ elif vtype == 'enum':
+ if 'values' not in param:
+ errors.append("%s=%s enum without list of values" % (param.get('name'), value))
+ else:
+ opts = param['values']
+ if isinstance(opts, str):
+ opts = opts.replace(',', ' ').split(' ')
+ for v in opts:
+ if value.lower() == v.lower():
+ return v
+ errors.append("%s=%s does not match '%s'" % (param.get('name'), value, "|".join(opts)))
+ elif vtype == 'ip_address':
+ if not _valid_ip(value):
+ errors.append("%s=%s is not an IP address" % (param.get('name'), value))
+ elif vtype == 'ip_network':
+ sp = value.rsplit('/', 1)
+ if len(sp) == 1 and not (is_valid_ipv4_address(value) or is_valid_ipv6_address(value)):
+ errors.append("%s=%s is not a valid IP network" % (param.get('name'), value))
+ elif len(sp) == 2 and (not _valid_ip(sp[0]) or not _valid_integer(sp[1])):
+ errors.append("%s=%s is not a valid IP network" % (param.get('name'), value))
+ else:
+ errors.append("%s=%s is not a valid IP network" % (param.get('name'), value))
+ elif vtype == 'port':
+ ok, ival = _valid_integer(value)
+ if not ok:
+ errors.append("%s=%s is not a valid port" % (param.get('name'), value))
+ if ival < 0 or ival > 65535:
+ errors.append("%s=%s is out of port range" % (param.get('name'), value))
+ elif vtype == 'email':
+ if not re.match(r'[^@]+@[^@]+', value):
+ errors.append("%s=%s is not a valid email address" % (param.get('name'), value))
+ else:
+ errors.append("%s=%s is unknown type %s" % (param.get('name'), value, vtype))
+ return value
+
+
+_NO_RESOLVE = object()
+
+
+def _resolve_direct(step, pname, pvalue, path, errors):
+ step_parameters = step.get('parameters', [])
+ step_steps = step.get('steps', [])
+ param = _find_by_name(step_parameters, pname)
+ if param is not None:
+ # resolved to a parameter... now verify the value type?
+ return _verify_type(param, pvalue, errors)
+ substep = _find_by_name(step_steps, pname)
+ if substep is not None:
+ # resolved to a step... recurse
+ return _resolve_params(substep, pvalue, path + [pname], errors)
+ return _NO_RESOLVE
+
+
+def _resolve_unnamed_step(step, pname, pvalue, path, errors):
+ step_steps = step.get('steps', [])
+ substep = _find_by_name(step_steps, '')
+ if substep is not None:
+ return _resolve_direct(substep, pname, pvalue, path, errors)
+ return _NO_RESOLVE
+
+
+def _resolve_single_step(step, pname, pvalue, path, errors):
+ step_steps = step.get('steps', [])
+ if len(step_steps) >= 1:
+ first_step = step_steps[0]
+ return _resolve_direct(first_step, pname, pvalue, path + [first_step.get('name')], errors)
+ return _NO_RESOLVE
+
+
+def _resolve_params(step, params, path, errors):
+ """
+ any parameter that doesn't resolve is an error
+ """
+ ret = {}
+
+ for pname, pvalue in params.items():
+ result = _resolve_direct(step, pname, pvalue, path, errors)
+ if result is not _NO_RESOLVE:
+ ret[pname] = result
+ continue
+
+ result = _resolve_unnamed_step(step, pname, pvalue, path, errors)
+ if result is not _NO_RESOLVE:
+ ret[pname] = result
+ continue
+
+ result = _resolve_single_step(step, pname, pvalue, path, errors)
+ if result is not _NO_RESOLVE:
+ stepname = step['steps'][0].get('name', '')
+ if stepname not in ret:
+ ret[stepname] = {}
+ ret[stepname][pname] = result
+ ret[pname] = result
+ continue
+
+ errors.append("Unknown parameter %s" % (':'.join(path + [pname])))
+
+ return ret
+
+
+def _check_parameters(script, params):
+ '''
+ 1. Fill in values where none are supplied and there's a value
+ in the step data
+ 2. Check missing values
+ 3. For each input parameter: look it up and adjust the path
+ '''
+ errors = []
+ # params = deepcopy(params)
+ # recursively resolve parameters: report
+ # error if a parameter can't be resolved
+ # TODO: move "common params" out of the params dict completely
+ # pass as flags to command line
+
+ def _split_commons(params):
+ ret, cdict = {}, dict([(c, d) for c, d, _ in common_params()])
+ for key, value in params.items():
+ if key in cdict:
+ cdict[key] = value
+ else:
+ ret[key] = deepcopy(value)
+ return ret, cdict
+
+ params, commons = _split_commons(params)
+ params = _resolve_params(script, params, [], errors)
+
+ if errors:
+ raise ValueError('\n'.join(errors))
+
+ for key, value in commons.items():
+ params[key] = value
+
+ def _fill_values(path, into, source, srcreq):
+ """
+ Copy values into into while checking for missing required parameters.
+ If into has content, all required parameters ARE required, even if the
+ whole step is not required (since we're supplying it). This is checked
+ by checking if the step is not required, but there are some parameters
+ set by the user anyway.
+ """
+ if 'required' in source:
+ srcreq = (source['required'] and srcreq) or (into and srcreq)
+
+ for param in source.get('parameters', []):
+ if param['name'] not in into:
+ if 'value' in param:
+ into[param['name']] = param['value']
+ elif srcreq and param['required']:
+ errors.append(_scoped_param(path, param['name']))
+
+ for step in source.get('steps', []):
+ required = step.get('required', True)
+ if not required and step['name'] not in into:
+ continue
+ if not required and step['name'] in into and into[step['name']]:
+ required = True
+ if 'name' not in step:
+ _fill_values(path, into, step, required and srcreq)
+ else:
+ if step['name'] not in into:
+ into[step['name']] = {}
+ _fill_values(path + [step['name']], into[step['name']], step, required and srcreq)
+
+ _fill_values([], params, script, True)
+
+ if errors:
+ raise ValueError("Missing required parameter(s): %s" % (', '.join(errors)))
+
+ # if config.core.debug:
+ # from pprint import pprint
+ # print("Checked script parameters:")
+ # pprint(params)
+ return params
+
+
+def _handles_values(ret, script, params, subactions):
+ """
+ Generate a values structure that the handles
+ templates understands.
+ """
+ def _process(to, context, params):
+ """
+ to: level writing to
+ context: source step
+ params: values for step
+ """
+ for key, value in params.items():
+ if not isinstance(value, dict):
+ to[key] = value
+
+ for step in context.get('steps', []):
+ name = step.get('name', '')
+ if name:
+ if step['required'] or name in params:
+ obj = {}
+ vobj = handles.value(obj, '')
+ to[name] = vobj
+ subaction = None
+ if step.get('sub-script'):
+ subaction = subactions.get(step['sub-script']['name'])
+ if subaction and subaction[-1]['name'] == 'cib':
+ vobj.value = Text.cib(script, subaction[-1]['value'])
+ else:
+ vobj.value = Text.cib(script, step.get('value', vobj.value))
+
+ _process(obj, step, params.get(name, {}))
+ else:
+ _process(to, step, params)
+
+ _process(ret, script, params)
+
+
+def _has_remote_actions(actions):
+ """
+ True if any actions execute on remote nodes
+ """
+ for action in actions:
+ if action['name'] in ('collect', 'apply', 'install', 'service', 'copy'):
+ return True
+ if action.get('nodes') == 'all':
+ return True
+ return False
+
+
+def _flatten_parameters(steps):
+ pret = []
+ for step in steps:
+ stepname = step.get('name', '')
+ for param in step.get('parameters', []):
+ if stepname:
+ pret.append('%s:%s' % (stepname, param['name']))
+ else:
+ pret.append(param['name'])
+ return pret
+
+
+def param_completion_list(name):
+ """
+ Returns completions for the given script
+ """
+ try:
+ script = load_script(name)
+ params = _flatten_parameters(script.get('steps', []))
+ ps = [p['name'] + '=' for p in params]
+ return ps
+ except Exception:
+ return []
+
+
+def _create_script_workdir(script, workdir):
+ "Create workdir and copy contents of scriptdir into it"
+ scriptdir = script['dir']
+ try:
+ if scriptdir is not None:
+ if os.path.basename(scriptdir) == script['name']:
+ cmd = ["mkdir", "-p", os.path.dirname(workdir)]
+ else:
+ cmd = ["mkdir", "-p", workdir]
+ if options.regression_tests:
+ print(".EXT", cmd)
+ if subprocess.call(cmd, shell=False) != 0:
+ raise ValueError("Failed to create temporary working directory")
+ # only copytree if script is a dir
+ if os.path.basename(scriptdir) == script['name']:
+ shutil.copytree(scriptdir, workdir)
+ else:
+ cmd = ["mkdir", "-p", workdir]
+ if options.regression_tests:
+ print(".EXT", cmd)
+ if subprocess.call(cmd, shell=False) != 0:
+ raise ValueError("Failed to create temporary working directory")
+ except (IOError, OSError) as e:
+ raise ValueError(e)
+
+
+def _copy_utils(dst):
+ '''
+ Copy run utils to the destination directory
+ '''
+ try:
+ import glob
+ for f in glob.glob(os.path.join(config.path.sharedir, 'utils/*.py')):
+ shutil.copy(f, dst)
+ except (IOError, OSError) as e:
+ raise ValueError(e)
+
+
+def _create_remote_workdirs(printer, hosts, path, timeout_seconds):
+ "Create workdirs on remote hosts"
+ ok = True
+ for host, result in _parallax_call(printer, hosts,
+ "mkdir -p %s" % (os.path.dirname(path)),
+ timeout_seconds).items():
+ if isinstance(result, crmsh.parallax.Error):
+ printer.error(host, "Start: %s" % (result))
+ ok = False
+ if not ok:
+ msg = "Failed to connect to one or more of these hosts via SSH: %s" % (
+ ', '.join(h[0] for h in hosts))
+ raise ValueError(msg)
+
+
+def _copy_to_remote_dirs(printer, hosts, path, timeout_seconds):
+ "Copy a local folder to same location on remote hosts"
+ ok = True
+ for host, exc in _parallax_copy(printer, hosts,
+ path,
+ path,
+ timeout_seconds).items():
+ if exc is not None:
+ printer.debug("_copy_to_remote_dirs failed: %s, %s" % (hosts, path))
+ printer.error(host, exc)
+ ok = False
+ if not ok:
+ raise ValueError("Failed when copying script data, aborting.")
+ return ok
+
+
+def _copy_local(printer, workdir, local_node, src, dst):
+ ok = True
+ if local_node and not src.startswith(workdir):
+ try:
+ if os.path.abspath(src) != os.path.abspath(dst):
+ if os.path.isfile(src):
+ shutil.copy(src, dst)
+ else:
+ shutil.copytree(src, dst)
+ except (IOError, OSError, shutil.Error) as e:
+ printer.error(local_node, e)
+ ok = False
+ return ok
+
+
+def _copy_to_all(printer, workdir, hosts, local_node, src, dst, timeout_seconds):
+ """
+ Copy src to dst both locally and remotely
+ """
+ ok = True
+ ret = _parallax_copy(printer, hosts, src, dst, timeout_seconds)
+ for host, exc in ret.items():
+ if exc is not None:
+ printer.error(host, exc)
+ ok = False
+ return ok and _copy_local(printer, workdir, local_node, src, dst)
+
+
+def _clean_parameters(params):
+ ret = []
+ for param in params:
+ rp = {}
+ for elem in ('name', 'required', 'unique', 'advanced', 'type', 'example'):
+ if elem in param:
+ rp[elem] = param[elem]
+ if 'shortdesc' in param:
+ rp['shortdesc'] = _strip(param['shortdesc'])
+ if 'longdesc' in param:
+ rp['longdesc'] = format_desc(param['longdesc'])
+ if 'value' in param:
+ val = param['value']
+ if isinstance(val, Text):
+ val = val.text
+ rp['value'] = val
+ ret.append(rp)
+ return ret
+
+
+def clean_steps(steps):
+ ret = []
+ for step in steps:
+ rstep = {}
+ if 'name' in step:
+ rstep['name'] = step['name']
+ if 'shortdesc' in step:
+ rstep['shortdesc'] = _strip(step['shortdesc'])
+ if 'longdesc' in step:
+ rstep['longdesc'] = format_desc(step['longdesc'])
+ if 'required' in step:
+ rstep['required'] = step['required']
+ if 'parameters' in step:
+ rstep['parameters'] = _clean_parameters(step['parameters'])
+ if 'steps' in step:
+ rstep['steps'] = clean_steps(step['steps'])
+ ret.append(rstep)
+ return ret
+
+
+def clean_run_params(params):
+ for key, value in params.items():
+ if isinstance(value, dict):
+ clean_run_params(value)
+ elif Text.isa(value):
+ params[key] = str(value)
+ return params
+
+
+def _chmodx(path):
+ "chmod +x <path>"
+ mode = os.stat(path).st_mode
+ mode |= (mode & 0o444) >> 2
+ os.chmod(path, mode)
+
+
+class RunActions(object):
+ def __init__(self, printer, script, params, actions, local_node, hosts, workdir, timeout_seconds):
+ self.printer = printer
+ self.script = script
+ self.data = [clean_run_params(params)]
+ self.actions = actions
+ self.local_node = local_node
+ self.hosts = hosts
+ self.dry_run = params.get('dry_run', False)
+ self.sudo = params.get('sudo', False)
+ self.workdir = workdir
+ self.timeout_seconds = timeout_seconds
+ self.statefile = os.path.join(self.workdir, 'script.input')
+ self.dstfile = os.path.join(self.workdir, 'script.input')
+ self.sudo_pass = None
+ self.result = None
+ self.output = None
+ self.rc = False
+
+ def prepare(self, has_remote_actions):
+ if not self.dry_run:
+ _create_script_workdir(self.script, self.workdir)
+ json.dump(self.data, open(self.statefile, 'w'))
+ _copy_utils(self.workdir)
+ if has_remote_actions:
+ _create_remote_workdirs(self.printer, self.hosts, self.workdir, self.timeout_seconds)
+ _copy_to_remote_dirs(self.printer, self.hosts, self.workdir, self.timeout_seconds)
+ # make sure all path references are relative to the script directory
+ os.chdir(self.workdir)
+
+ def single_action(self, action_index, statefile):
+ self.statefile = statefile
+ try:
+ action_index = int(action_index) - 1
+ except ValueError:
+ raise ValueError("action parameter must be an index")
+ if action_index < 0 or action_index >= len(self.actions):
+ raise ValueError("action index out of range")
+
+ action = self.actions[action_index]
+ logger.debug("Execute: %s", action)
+ # if this is not the first action, load action data
+ if action_index != 1:
+ if not os.path.isfile(statefile):
+ raise ValueError("No state for action: %s" % (action_index))
+ self.data = json.load(open(statefile))
+ if Actions.needs_sudo(action):
+ self._check_sudo_pass()
+ result = self._run_action(action)
+ json.dump(self.data, open(self.statefile, 'w'))
+ return result
+
+ def all_actions(self):
+ # TODO: run asynchronously on remote nodes
+ # run on remote nodes
+ # run on local nodes
+ # TODO: wait for remote results
+ for action in self.actions:
+ if Actions.needs_sudo(action):
+ self._check_sudo_pass()
+ if not self._run_action(action):
+ return False
+ return True
+
+ def _update_state(self):
+ if self.dry_run:
+ return True
+ json.dump(self.data, open(self.statefile, 'w'))
+ return _copy_to_all(self.printer,
+ self.workdir,
+ self.hosts,
+ self.local_node,
+ self.statefile,
+ self.dstfile,
+ self.timeout_seconds)
+
+ def run_command(self, nodes, command, is_json_output):
+ "called by Actions"
+ cmdline = 'cd "%s"; ./%s' % (self.workdir, command)
+ if not self._update_state():
+ raise ValueError("Failed when updating input, aborting.")
+ self.call(nodes, cmdline, is_json_output)
+
+ def copy_file(self, nodes, src, dst):
+ if not self._is_local(nodes):
+ ok = _copy_to_all(self.printer,
+ self.workdir,
+ self.hosts,
+ self.local_node,
+ src,
+ dst,
+ self.timeout_seconds)
+ else:
+ ok = _copy_local(self.printer,
+ self.workdir,
+ self.local_node,
+ src,
+ dst)
+ self.result = '' if ok else None
+ self.rc = ok
+
+ def record_json(self):
+ "called by Actions"
+ if self.result is not None:
+ if not self.result:
+ self.result = {}
+ self.data.append(self.result)
+ self.rc = True
+ else:
+ self.rc = False
+
+ def validate_json(self):
+ "called by Actions"
+ if self.dry_run:
+ self.rc = True
+ return
+
+ if self.result is not None:
+ if not self.result:
+ self.result = ''
+ self.data.append(self.result)
+ if isinstance(self.result, dict):
+ for k, v in self.result.items():
+ self.data[0][k] = v
+ self.rc = True
+ else:
+ self.rc = False
+
+ def report_result(self):
+ "called by Actions"
+ if self.result is not None:
+ self.output = self.result
+ self.rc = True
+ else:
+ self.rc = False
+
+ def _run_action(self, action):
+ """
+ Execute a single action
+ """
+ method = _actions[action['name']]
+ self.printer.start(action)
+ try:
+ self.output = None
+ self.result = None
+ self.rc = False
+ method(Actions(self, action))
+ self.printer.finish(action, self.rc, self.output)
+ return self.rc
+ finally:
+ self.printer.flush()
+ return False
+
+ def _check_sudo_pass(self):
+ if self.sudo and not self.sudo_pass and userdir.getuser() != 'root':
+ prompt = "sudo password: "
+ self.sudo_pass = getpass.getpass(prompt=prompt)
+
+ def _is_local(self, nodes):
+ islocal = False
+ if nodes == 'all':
+ pass
+ elif nodes == 'local':
+ islocal = True
+ elif nodes is not None and nodes != []:
+ islocal = nodes == [self.local_node_name()]
+ else:
+ islocal = True
+ self.printer.debug("is_local (%s): %s" % (nodes, islocal))
+ return islocal
+
+ def call(self, nodes, cmdline, is_json_output=False):
+ if cmdline.startswith("#!"):
+ self.execute_shell(nodes or 'all', cmdline)
+ else:
+ if not self._is_local(nodes):
+ self.result = self._process_remote(cmdline, is_json_output)
+ else:
+ self.result = self._process_local(cmdline, is_json_output)
+ self.rc = self.result not in (False, None)
+
+ def execute_shell(self, nodes, cmdscript):
+ """
+ execute the shell script...
+ """
+ cmdscript = str(cmdscript).rstrip() + '\n'
+ if self.dry_run:
+ self.printer.print_command(nodes, cmdscript)
+ self.result = ''
+ self.rc = True
+ return
+ elif config.core.debug:
+ self.printer.print_command(nodes, cmdscript)
+
+ tmpf = self.str2tmp(cmdscript)
+ _chmodx(tmpf)
+ if not self._is_local(nodes):
+ ok = _copy_to_remote_dirs(self.printer,
+ self.hosts,
+ tmpf,
+ self.timeout_seconds)
+ if not ok:
+ self.result = False
+ else:
+ cmdline = 'cd "%s"; %s' % (self.workdir, tmpf)
+ self.result = self._process_remote(cmdline, False)
+ else:
+ cmdline = 'cd "%s"; %s' % (self.workdir, tmpf)
+ self.result = self._process_local(cmdline, False)
+ self.rc = self.result not in (None, False)
+
+ def str2tmp(self, s):
+ """
+ Create a temporary file in the temp workdir
+ Returns path to file
+ """
+ fn = os.path.join(self.workdir, _tempname('str2tmp'))
+ if self.dry_run:
+ self.printer.print_command(self.local_node_name(), 'temporary file <<END\n%s\nEND\n' % (s))
+ return fn
+ elif config.core.debug:
+ self.printer.print_command(self.local_node_name(), 'temporary file <<END\n%s\nEND\n' % (s))
+ try:
+ with open(fn, "w") as f:
+ f.write(s)
+ if not s.endswith('\n'):
+ f.write("\n")
+ except IOError as msg:
+ self.printer.error(self.local_node_name(), "Write failed: %s" % (msg))
+ return
+ return fn
+
+ def _process_remote(self, cmdline, is_json_output):
+ """
+ Handle an action that executes on all nodes
+ """
+ ok = True
+ action_result = {}
+
+ if self.dry_run:
+ self.printer.print_command(self.hosts, cmdline)
+ return {}
+ elif config.core.debug:
+ self.printer.print_command(self.hosts, cmdline)
+
+ for host, result in _parallax_call(self.printer,
+ self.hosts,
+ cmdline,
+ self.timeout_seconds).items():
+ if isinstance(result, crmsh.parallax.Error):
+ self.printer.error(host, "Remote error: %s" % (result))
+ ok = False
+ else:
+ out = utils.to_ascii(result.stdout)
+ if result.returncode != 0:
+ self.printer.error(host, "Remote error (rc=%s) %s%s" % (result.returncode, out, result.stderr))
+ ok = False
+ elif is_json_output:
+ action_result[host] = json.loads(out)
+ else:
+ action_result[host] = out
+ if self.local_node:
+ ret = self._process_local(cmdline, False)
+ if ret is None:
+ ok = False
+ elif is_json_output:
+ action_result[self.local_node_name()] = json.loads(ret)
+ else:
+ action_result[self.local_node_name()] = ret
+ if ok:
+ self.printer.debug("Result: %s" % repr(action_result))
+ return action_result
+ return None
+
+ def _process_local(self, cmdline, is_json_output):
+ """
+ Handle an action that executes locally
+ """
+ if self.sudo_pass:
+ input_s = u'sudo: %s\n' % (self.sudo_pass)
+ else:
+ input_s = None
+ if self.dry_run:
+ self.printer.print_command(self.local_node_name(), cmdline)
+ return {}
+ elif config.core.debug:
+ self.printer.print_command(self.local_node_name(), cmdline)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmdline, input_s=input_s, shell=True)
+ if rc != 0:
+ self.printer.error(self.local_node_name(), "Error (%d): %s" % (rc, err))
+ return None
+ self.printer.debug("Result(local): %s" % repr(out))
+ if is_json_output:
+ if out != '':
+ out = json.loads(out)
+ return out
+
+ def local_node_name(self):
+ if self.local_node:
+ return self.local_node[0]
+ return "localhost"
+
+
+def run(script, params, printer):
+ '''
+ Run the given script on the given set of hosts
+ name: a cluster script is a folder <name> containing a main.yml or main.xml file
+ params: a tree of parameters
+ printer: Object that receives and formats output
+ '''
+ workdir = _generate_workdir_name()
+ # pull out the actions to perform based on the actual
+ # parameter values (so discard actions conditional on
+ # conditions that are false)
+ params = _check_parameters(script, params)
+ user = params['user']
+ port = params['port']
+ _filter_dict(params, 'nodes', _filter_nodes, user, port)
+ _filter_dict(params, 'dry_run', _make_boolean)
+ _filter_dict(params, 'sudo', _make_boolean)
+ _filter_dict(params, 'statefile', lambda x: (x and os.path.abspath(x)) or x)
+ if config.core.debug:
+ params['debug'] = True
+ actions = _process_actions(script, params)
+ name = script['name']
+ hosts = params['nodes']
+ printer.print_header(script, params, hosts)
+ local_node, hosts = _extract_localnode(hosts)
+
+ dry_run = params.get('dry_run', False)
+
+ has_remote_actions = _has_remote_actions(actions)
+
+ try:
+ runner = RunActions(printer, script, params, actions, local_node, hosts, workdir, int(params['timeout']))
+ runner.prepare(has_remote_actions)
+ action = params['action']
+ statefile = params['statefile']
+ if action or statefile:
+ if not action or not statefile:
+ raise ValueError("Must set both action and statefile")
+ return runner.single_action(action, statefile)
+ else:
+ return runner.all_actions()
+
+ except (OSError, IOError) as e:
+ import traceback
+ traceback.print_exc()
+ raise ValueError("Internal error while running %s: %s" % (name, e))
+ finally:
+ if not dry_run:
+ if not config.core.debug:
+ _run_cleanup(printer, has_remote_actions, local_node, hosts, workdir, int(params['timeout']))
+ elif has_remote_actions:
+ _print_debug(printer, local_node, hosts, workdir, int(params['timeout']))
+ else:
+ _print_debug(printer, local_node, None, workdir, int(params['timeout']))
+
+
+def _remove_empty_lines(txt):
+ return '\n'.join(line for line in txt.split('\n') if line.strip())
+
+
+def _process_actions(script, params):
+ """
+ Given parameter values, we can process
+ all the handles data and generate all the
+ actions to perform, validate and check conditions.
+ """
+
+ subactions = {}
+ values = {}
+ script['__values__'] = values
+
+ for step in script['steps']:
+ _handles_values(values, script, params, subactions)
+ if not step.get('required', True) and not params.get(step['name']):
+ continue
+ obj = step.get('sub-script')
+ if obj:
+ try:
+ subparams = params.get(step['name'], {})
+ subactions[step['name']] = _process_actions(obj, subparams)
+ except ValueError as err:
+ raise ValueError("Error in included script %s: %s" % (step['name'], err))
+
+ _handles_values(values, script, params, subactions)
+ actions = deepcopy(script['actions'])
+
+ ret = []
+ for action in actions:
+ name = _find_action(action)
+ if name is None:
+ raise ValueError("Unknown action: %s" % (list(action.keys())))
+ action['name'] = name
+ toadd = []
+ if name == 'include':
+ if action['include'] in subactions:
+ toadd.extend(subactions[action['include']])
+ else:
+ Actions.parse(script, action)
+ if 'when' in action:
+ when = str(action['when']).strip()
+ if when not in (False, None, '', 'false'):
+ toadd.append(action)
+ else:
+ toadd.append(action)
+ if ret:
+ for add in toadd:
+ if Actions.mergeable(add) and ret[-1]['name'] == add['name']:
+ if not Actions.merge(ret[-1], add):
+ ret.append(add)
+ else:
+ ret.append(add)
+ else:
+ ret.extend(toadd)
+ return ret
+
+
+def verify(script, params, external_check=True):
+ """
+ Verify the given parameter values, reporting
+ errors where such are detected.
+
+ Return a list of actions to perform.
+ """
+ params = _check_parameters(script, params)
+ actions = _process_actions(script, params)
+
+ if external_check and all(action['name'] == 'cib' for action in actions) and utils.is_program('crm'):
+ errors = set([])
+ cmd = ["cib new"]
+ for action in actions:
+ cmd.append(_join_script_lines(action['value']))
+ cmd.extend(["verify", "commit", "\n"])
+ try:
+ logger.debug("Try executing %s", "\n".join(cmd))
+ rc, out = utils.filter_string(['crm', '-f', '-', 'configure'], "\n".join(cmd).encode('utf-8'), stderr_on='stdout', shell=False)
+ errm = re.compile(r"^ERROR: \d+: (.*)$")
+ outp = []
+ for l in (out or "").splitlines():
+ m = errm.match(l)
+ if m:
+ errors.add(m.group(1))
+ else:
+ outp.append(l)
+ if rc != 0 and len(errors) == 0:
+ errors.add("Failed to verify (rc=%s): %s" % (rc, "\n".join(outp)))
+ except OSError as e:
+ errors.add(str(e))
+ if len(errors):
+ raise ValueError("\n".join(errors))
+
+ return actions
+
+
+def _make_boolean(v):
+ if isinstance(v, str):
+ return utils.get_boolean(v)
+ return v not in (0, False, None)
diff --git a/crmsh/service_manager.py b/crmsh/service_manager.py
new file mode 100644
index 0000000..3d51e94
--- /dev/null
+++ b/crmsh/service_manager.py
@@ -0,0 +1,97 @@
+import typing
+
+import crmsh.parallax
+import crmsh.sh
+
+
+class ServiceManager(object):
+ """
+ Class to manage systemctl services
+ """
+
+ def __init__(self, shell: crmsh.sh.ClusterShell = None):
+ self._shell = crmsh.sh.cluster_shell() if shell is None else shell
+
+ def service_is_available(self, name, remote_addr=None):
+ """
+ Check whether service is available
+ """
+ return 0 == self._run_on_single_host("systemctl list-unit-files '{}'".format(name), remote_addr)
+
+ def service_is_enabled(self, name, remote_addr=None):
+ """
+ Check whether service is enabled
+ """
+ return 0 == self._run_on_single_host("systemctl is-enabled '{}'".format(name), remote_addr)
+
+ def service_is_active(self, name, remote_addr=None):
+ """
+ Check whether service is active
+ """
+ return 0 == self._run_on_single_host("systemctl is-active '{}'".format(name), remote_addr)
+
+ def start_service(self, name, enable=False, remote_addr=None, node_list=[]):
+ """
+ Start service
+ Return success node list
+ """
+ if enable:
+ cmd = "systemctl enable --now '{}'".format(name)
+ else:
+ cmd = "systemctl start '{}'".format(name)
+ return self._call(remote_addr, node_list, cmd)
+
+ def _call(self, remote_addr: str, node_list: typing.List[str], cmd: str) -> typing.List[str]:
+ assert not (bool(remote_addr) and bool(node_list))
+ if len(node_list) == 1:
+ remote_addr = node_list[0]
+ node_list = list()
+ if node_list:
+ results = ServiceManager._call_with_parallax(cmd, node_list)
+ return [host for host, result in results.items() if isinstance(result, tuple) and result[0] == 0]
+ else:
+ rc = self._run_on_single_host(cmd, remote_addr)
+ if rc == 0:
+ return [remote_addr]
+ else:
+ return list()
+
+ def _run_on_single_host(self, cmd, host):
+ rc, _, _ = self._shell.get_rc_stdout_stderr_without_input(host, cmd)
+ if rc == 255:
+ raise ValueError("Failed to run command on host {}: {}".format(host, cmd))
+ return rc
+
+ @staticmethod
+ def _call_with_parallax(cmd, host_list):
+ ret = crmsh.parallax.parallax_run(host_list, cmd)
+ if ret is crmsh.parallax.Error:
+ raise ret
+ return ret
+
+ def stop_service(self, name, disable=False, remote_addr=None, node_list=[]):
+ """
+ Stop service
+ Return success node list
+ """
+ if disable:
+ cmd = "systemctl disable --now '{}'".format(name)
+ else:
+ cmd = "systemctl stop '{}'".format(name)
+ return self._call(remote_addr, node_list, cmd)
+
+ def enable_service(self, name, remote_addr=None, node_list=[]):
+ """
+ Enable service
+ Return success node list
+ """
+ cmd = "systemctl enable '{}'".format(name)
+ return self._call(remote_addr, node_list, cmd)
+
+ def disable_service(self, name, remote_addr=None, node_list=[]):
+ """
+ Disable service
+ Return success node list
+ """
+ cmd = "systemctl disable '{}'".format(name)
+ return self._call(remote_addr, node_list, cmd)
diff --git a/crmsh/sh.py b/crmsh/sh.py
new file mode 100644
index 0000000..1571d39
--- /dev/null
+++ b/crmsh/sh.py
@@ -0,0 +1,479 @@
+"""Run shell commands.
+
+This module provides various methods to run shell commands, on both local and remote hosts, as current or another user.
+There many variant of the methods to allow fine-gain control of parameter passing and error handling.
+
+4 different implementations are provided:
+
+1. LocalShell allows to run command on local host as various users. It is the most feature-rich one, allowing
+ interactive I/O from/to the terminal.
+2. SSHShell allows to run command on both local and remote hosts. When running on a remote host, it creates a direct
+ connection from a specified local_user to the destination host and user. User input from terminal is not allowed, as
+ the command is passed through the stdin.
+3. ClusterShell runs command on cluster nodes. It leverages su, sudo and ssh to obtain a appreciated session on a
+ destination node. It is only available after ssh bootstrap as it depends on the knowledge about cluster node and user
+ configurations.
+4. ShellUtils runs command on local host as current user. It is a simple wrapper around subprocess module.
+
+The LocalShell and SshShell is expected to be used in ssh bootstrap. Once the ssh bootstrap finishes, AuthShell should
+be used.
+"""
+import logging
+import os
+import pwd
+import re
+import socket
+import subprocess
+import typing
+from io import StringIO
+
+from . import constants
+from .pyshim import cache
+from . import user_of_host
+from .user_of_host import UserOfHost
+
+import crmsh.options
+
+logger = logging.getLogger(__name__)
+
+
+class Error(ValueError):
+ def __init__(self, msg, cmd):
+ super().__init__(msg)
+ self.cmd = cmd
+
+
+class AuthorizationError(Error):
+ def __init__(self, cmd: str, host: typing.Optional[str], user: str, msg: str):
+ super().__init__(
+ 'Failed to run command {cmd} on {optional_user}{host}: {msg} {diagnose}'.format(
+ optional_user=f'{user}@' if user is not None else '',
+ host=host, msg=msg, cmd=cmd,
+ diagnose=self.diagnose(),
+ ),
+ cmd
+ )
+ self.host = host
+ self.user = user
+
+ @staticmethod
+ def diagnose() -> str:
+ if user_of_host.instance().use_ssh_agent():
+ with StringIO() as buf:
+ if 'SSH_AUTH_SOCK' not in os.environ:
+ buf.write('Environment variable SSH_AUTH_SOCK does not exist.')
+ if 'SUDO_USER' in os.environ:
+ buf.write(' Please check whether ssh-agent is available and consider using "sudo --preserve-env=SSH_AUTH_SOCK".')
+ return buf.getvalue()
+
+
+class CommandFailure(Error):
+ def __init__(self, cmd: str, host: typing.Optional[str], user: typing.Optional[str], msg: str):
+ if host is None and user is None:
+ super().__init__("Failed to run '{}': {}".format(cmd, msg), cmd)
+ elif user is None:
+ super().__init__("Failed to run command on {}: '{}': {}".format(host, cmd, msg), cmd)
+ elif host is None:
+ super().__init__("Failed to run command as {}: '{}': {}".format(user, cmd, msg), cmd)
+ else:
+ super().__init__("Failed to run command as {}@{}: '{}': {}".format(user, host, cmd, msg), cmd)
+ self.host = host
+ self.user = user
+
+
+class Utils:
+ @staticmethod
+ def decode_str(x: bytes):
+ try:
+ return x.decode('utf-8')
+ except UnicodeDecodeError as e:
+ logger.debug('UTF-8 decode failure', exc_info=e)
+ return x.decode('utf-8', errors='backslashreplace')
+
+
+class LocalShell:
+ """Provides methods to run commands on localhost, both as current user and switching to another user"""
+ @staticmethod
+ @cache
+ def hostname():
+ return socket.gethostname()
+
+ @staticmethod
+ @cache
+ def geteuid() -> int:
+ return os.geteuid()
+
+ @staticmethod
+ @cache
+ def get_effective_user_name() -> str:
+ return pwd.getpwuid(LocalShell.geteuid()).pw_name
+
+ def __init__(self, additional_environ: typing.Dict[str, str] = None):
+ self.additional_environ = additional_environ
+ self.preserve_env = additional_environ.keys() if additional_environ is not None else None
+
+ def can_run_as(self, user: str):
+ return self.geteuid() == 0 or self.get_effective_user_name() == user
+
+ def su_subprocess_run(
+ self,
+ user: typing.Optional[str],
+ cmd: str,
+ tty=False,
+ **kwargs,
+ ):
+ """Call subprocess.run as another user.
+
+ This variant is the most flexible one as it pass unknown kwargs to the underlay subprocess.run. However, it
+ accepts only cmdline but not argv, as the argv is used internally to switch user.
+ """
+ if user is None or self.get_effective_user_name() == user:
+ args = ['/bin/sh', '-c', cmd]
+ elif 0 == self.geteuid():
+ args = ['su', user, '--login', '-s', '/bin/sh', '-c', cmd]
+ if tty:
+ args.append('--pty')
+ if self.preserve_env:
+ args.append('-w')
+ args.append(','.join(self.preserve_env))
+ else:
+ raise AuthorizationError(
+ cmd, None, user,
+ f"non-root user '{self.get_effective_user_name()}' cannot switch to another user"
+ )
+ if not self.additional_environ:
+ logger.debug('su_subprocess_run: %s, %s', args, kwargs)
+ return subprocess.run(args, **kwargs)
+ else:
+ logger.debug('su_subprocess_run: %s, env=%s, %s', args, self.additional_environ, kwargs)
+ env = dict(os.environ)
+ env.update(self.additional_environ)
+ return subprocess.run(args, env=env, **kwargs)
+
+ def get_rc_stdout_stderr_raw(self, user: typing.Optional[str], cmd: str, input: typing.Optional[bytes] = None):
+ result = self.su_subprocess_run(
+ user, cmd,
+ input=input,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ return result.returncode, result.stdout, result.stderr
+
+ def get_rc_stdout_stderr(self, user: typing.Optional[str], cmd: str, input: typing.Optional[str] = None):
+ rc, stdout, stderr = self.get_rc_stdout_stderr_raw(user, cmd, input.encode('utf-8') if input is not None else None)
+ return rc, Utils.decode_str(stdout).strip(), Utils.decode_str(stderr).strip()
+
+ def get_rc_and_error(
+ self,
+ user: typing.Optional[str],
+ cmd: str,
+ ) -> typing.Tuple[int, typing.Optional[str]]:
+ """Run a command for its side effects. Returns (rc, error_message)
+
+ If the return code is 0, outputs from the command will be ignored and (0, None) is returned.
+ If the return code is not 0, outputs from the stdout and stderr is combined as a single message.
+ """
+ result = self.su_subprocess_run(
+ user, cmd,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ start_new_session=True,
+ )
+ if result.returncode == 0:
+ return 0, None
+ else:
+ return result.returncode, Utils.decode_str(result.stdout).strip()
+
+ def get_stdout_or_raise_error(
+ self,
+ user: typing.Optional[str],
+ cmd: str,
+ success_exit_status: typing.Optional[typing.Set[int]] = None,
+ ):
+ result = self.su_subprocess_run(
+ user, cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ to_raise = False
+ if success_exit_status is None:
+ if result.returncode != 0:
+ to_raise = True
+ else:
+ if result.returncode not in success_exit_status:
+ to_raise = True
+ if not to_raise:
+ return Utils.decode_str(result.stdout).strip()
+ else:
+ raise CommandFailure(cmd, None, user, Utils.decode_str(result.stderr).strip())
+
+
+class SSHShell:
+ """Provides methods to run commands on both local and remote hosts as various users.
+
+ For remote commands, SSH sessions are created to the destination host and user from a specified local_user.
+ """
+ def __init__(self, local_shell: LocalShell, local_user):
+ self.local_shell = local_shell
+ self.local_user = local_user
+
+ def can_run_as(self, host: typing.Optional[str], user: str) -> bool:
+ # This method does not call subprocess_run_without_input. The reason may be some of the callers expect that ssh
+ # is used even if the destination host is localhost.
+ if host is None or host == self.local_shell.hostname():
+ return self.local_shell.can_run_as(user)
+ else:
+ ssh_options = "-o StrictHostKeyChecking=no -o EscapeChar=none -o ConnectTimeout=15"
+ ssh_cmd = "ssh {} -T -o Batchmode=yes {}@{} true".format(ssh_options, user, host)
+ rc, output = self.local_shell.get_rc_and_error(self.local_user, ssh_cmd)
+ return rc == 0
+
+ def get_rc_and_error(
+ self,
+ host: typing.Optional[str],
+ user: str,
+ cmd: str,
+ ) -> typing.Tuple[int, typing.Optional[str]]:
+ """Run a command for its side effects. Returns (rc, error_message)
+
+ If the return code is 0, outputs from the command will be ignored and (0, None) is returned.
+ If the return code is not 0, outputs from the stdout and stderr is combined as a single message.
+ """
+ result = self.subprocess_run_without_input(
+ host, user, cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ start_new_session=True,
+ )
+ if result.returncode == 0:
+ return 0, None
+ else:
+ return result.returncode, Utils.decode_str(result.stdout).strip()
+
+ def subprocess_run_without_input(self, host: typing.Optional[str], user: str, cmd: str, **kwargs):
+ assert 'input' not in kwargs and 'stdin' not in kwargs
+ if host is None or host == self.local_shell.hostname():
+ if user == self.local_shell.get_effective_user_name():
+ args = ['/bin/sh']
+ else:
+ args = ['sudo', '-H', '-u', user, '/bin/sh']
+ return subprocess.run(
+ args,
+ input=cmd.encode('utf-8'),
+ **kwargs,
+ )
+ else:
+ return self.local_shell.su_subprocess_run(
+ self.local_user,
+ 'ssh {} {}@{} /bin/sh'.format(constants.SSH_OPTION, user, host),
+ input=cmd.encode('utf-8'),
+ **kwargs,
+ )
+
+
+class ClusterShell:
+ """Provides methods to run commands on both local and remote cluster nodes.
+
+ For remote nodes, the local and remote user used for SSH sessions are determined from cluster configuration recorded
+ during bootstrap.
+ """
+ def __init__(
+ self,
+ local_shell: LocalShell,
+ user_of_host: UserOfHost,
+ forward_ssh_agent: bool = False,
+ raise_ssh_error: bool = False, # whether to raise AuthorizationError when ssh returns with 255
+ ):
+ self.local_shell = local_shell
+ self.user_of_host = user_of_host
+ self.forward_ssh_agent = forward_ssh_agent
+ self.raise_ssh_error = raise_ssh_error
+
+ def can_run_as(self, host: typing.Optional[str], user: str) -> bool:
+ try:
+ result = self.subprocess_run_without_input(
+ host, user, 'true',
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ except user_of_host.UserNotFoundError:
+ return False
+ return 0 == result.returncode
+
+ def subprocess_run_without_input(self, host: typing.Optional[str], user: typing.Optional[str], cmd: str, **kwargs):
+ assert 'input' not in kwargs and 'stdin' not in kwargs
+ if host is None or host == self.local_shell.hostname():
+ if user is None:
+ return subprocess.run(
+ ['/bin/sh'],
+ input=cmd.encode('utf-8'),
+ **kwargs,
+ )
+ else:
+ return self.local_shell.su_subprocess_run(
+ user, cmd,
+ **kwargs,
+ )
+ else:
+ if user is None:
+ user = 'root'
+ local_user, remote_user = self.user_of_host.user_pair_for_ssh(host)
+ result = self.local_shell.su_subprocess_run(
+ local_user,
+ 'ssh {} {} -o BatchMode=yes {}@{} sudo -H -u {} {} /bin/sh'.format(
+ '-A' if self.forward_ssh_agent else '',
+ constants.SSH_OPTION,
+ remote_user,
+ host,
+ user,
+ '--preserve-env=SSH_AUTH_SOCK' if self.forward_ssh_agent else '',
+ constants.SSH_OPTION,
+ ),
+ input=cmd.encode('utf-8'),
+ start_new_session=True,
+ **kwargs,
+ )
+ if self.raise_ssh_error and result.returncode == 255:
+ raise AuthorizationError(cmd, host, remote_user, Utils.decode_str(result.stderr).strip())
+ else:
+ return result
+
+ def get_rc_and_error(self, host: typing.Optional[str], user: str, cmd: str):
+ result = self.subprocess_run_without_input(
+ host, user, cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ if result.returncode == 0:
+ return 0, None
+ else:
+ return result.returncode, Utils.decode_str(result.stdout).strip()
+
+ def get_rc_stdout_stderr_raw_without_input(self, host, cmd) -> typing.Tuple[int, bytes, bytes]:
+ result = self.subprocess_run_without_input(
+ host, None, cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ return result.returncode, result.stdout, result.stderr
+
+ def get_rc_stdout_stderr_without_input(self, host, cmd) -> typing.Tuple[int, str, str]:
+ rc, stdout, stderr = self.get_rc_stdout_stderr_raw_without_input(host, cmd)
+ return rc, Utils.decode_str(stdout).strip(), Utils.decode_str(stderr).strip()
+
+ def get_stdout_or_raise_error(
+ self,
+ cmd: str,
+ host: typing.Optional[str] = None,
+ success_exit_status: typing.Optional[typing.Set[int]] = None,
+ ):
+ rc, stdout, stderr = self.get_rc_stdout_stderr_raw_without_input(host, cmd)
+ to_raise = False
+ if success_exit_status is None:
+ if rc != 0:
+ to_raise = True
+ else:
+ if rc not in success_exit_status:
+ to_raise = True
+ if not to_raise:
+ return Utils.decode_str(stdout).strip()
+ else:
+ raise CommandFailure(cmd, host, None, Utils.decode_str(stderr).strip())
+
+ def ssh_to_localhost(self, user: typing.Optional[str], cmd: str, **kwargs):
+ if user is None:
+ user = 'root'
+ host = self.local_shell.hostname()
+ local_user, remote_user = self.user_of_host.user_pair_for_ssh(host)
+ result = self.local_shell.su_subprocess_run(
+ local_user,
+ 'ssh {} {} {}@{} sudo -H -u {} {} /bin/sh'.format(
+ '-A' if self.forward_ssh_agent else '',
+ constants.SSH_OPTION,
+ remote_user,
+ host,
+ user,
+ '--preserve-env=SSH_AUTH_SOCK' if self.forward_ssh_agent else '',
+ constants.SSH_OPTION,
+ ),
+ input=cmd.encode('utf-8'),
+ **kwargs,
+ )
+ if self.raise_ssh_error and result.returncode == 255:
+ raise AuthorizationError(cmd, host, remote_user, Utils.decode_str(result.stderr).strip())
+ else:
+ return result
+
+
+class ShellUtils:
+ CONTROL_CHARACTER_PATTER = re.compile('[\u0000-\u001F]')
+
+ @classmethod
+ def get_stdout(cls, cmd, input_s=None, stderr_on=True, shell=True, raw=False):
+ '''
+ Run a cmd, return stdout output.
+ Optional input string "input_s".
+ stderr_on controls whether to show output which comes on stderr.
+ '''
+ if crmsh.options.regression_tests:
+ print(".EXT", cmd)
+ proc = subprocess.Popen(
+ cmd,
+ shell=shell,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.DEVNULL if stderr_on else subprocess.PIPE,
+ )
+ stdout_data, _ = proc.communicate(input_s)
+ if raw:
+ return proc.returncode, stdout_data
+ else:
+ if isinstance(stdout_data, bytes):
+ stdout_data = Utils.decode_str(stdout_data)
+ return proc.returncode, stdout_data.strip()
+
+ @classmethod
+ def get_stdout_stderr(cls, cmd, input_s=None, shell=True, raw=False, no_reg=False, timeout=None):
+ '''
+ Run a cmd, return (rc, stdout, stderr)
+ '''
+ if crmsh.options.regression_tests and not no_reg:
+ print(".EXT", cmd)
+ proc = subprocess.Popen(
+ cmd,
+ shell=shell,
+ stdin=input_s and subprocess.PIPE or None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ # will raise subprocess.TimeoutExpired if set timeout
+ stdout_data, stderr_data = proc.communicate(input_s, timeout=timeout)
+ if raw:
+ return proc.returncode, stdout_data, stderr_data
+ else:
+ if isinstance(stdout_data, bytes):
+ stdout_data = Utils.decode_str(stdout_data)
+ stderr_data = Utils.decode_str(stderr_data)
+ return proc.returncode, stdout_data.strip(), stderr_data.strip()
+
+
+class ClusterShellAdaptorForLocalShell(ClusterShell):
+ """A adaptor to wrap a LocalShell as a ClusterShell.
+
+ Some modules depend on shell and are called both during bootstrap and after bootstrap. Use a LocalShell as their
+ implementation in bootstrap make the difference more explicit, avoid dependency on outdated cluster configurations
+ (for example, the configurations left from previous cluster bootstrap) and help to catch errors in tests.
+ """
+ def __init__(self, local_shell: LocalShell):
+ super().__init__(local_shell, None)
+
+ def subprocess_run_without_input(self, host: str, user: typing.Optional[str], cmd: str, **kwargs):
+ assert 'input' not in kwargs and 'stdin' not in kwargs
+ assert host is None or host == self.local_shell.hostname()
+ if user is None:
+ user = 'root'
+ return self.local_shell.su_subprocess_run(user, cmd, **kwargs)
+
+
+def cluster_shell():
+ return ClusterShell(LocalShell(), user_of_host.instance(), raise_ssh_error=True)
diff --git a/crmsh/ssh_key.py b/crmsh/ssh_key.py
new file mode 100644
index 0000000..78e24b2
--- /dev/null
+++ b/crmsh/ssh_key.py
@@ -0,0 +1,267 @@
+import logging
+import os
+import pwd
+import re
+import subprocess
+import tempfile
+import typing
+from io import StringIO
+
+from crmsh import sh
+
+
+logger = logging.getLogger(__name__)
+
+
+class Error(ValueError):
+ def __init__(self, msg: str):
+ super().__init__(msg)
+
+
+class AgentNotAvailableError(Error):
+ def __init__(self, msg):
+ super().__init__(f'{msg}{self.diagnose()}')
+
+ @staticmethod
+ def diagnose() -> str:
+ with StringIO() as buf:
+ if 'SSH_AUTH_SOCK' not in os.environ:
+ buf.write(' Environment variable SSH_AUTH_SOCK does not exist.')
+ if 'SUDO_USER' in os.environ:
+ buf.write(' Please check whether ssh-agent is available and consider using "sudo --preserve-env=SSH_AUTH_SOCK".')
+ return buf.getvalue()
+
+
+class NoKeysInAgentError(Error):
+ def __init__(self, msg):
+ super().__init__(f'{msg}{self.diagnose()}')
+
+ @staticmethod
+ def diagnose() -> str:
+ ssh_auth_sock = os.environ["SSH_AUTH_SOCK"]
+ st = os.stat(ssh_auth_sock)
+ owner_name = pwd.getpwuid(st.st_uid).pw_name
+ return f' crmsh is using an ssh-agent listening at {ssh_auth_sock}, owned by {owner_name}. Please add at least one key pair with `ssh-add`'
+
+
+class Key:
+ def public_key(self) -> str:
+ raise NotImplementedError
+
+
+class KeyFile(Key):
+ def __init__(self, path: str):
+ self._path = os.path.realpath(path)
+ self._public_key = None
+
+ def public_key_file(self) -> typing.Optional[str]:
+ return self._path
+
+ def public_key(self) -> str:
+ if self._public_key:
+ return self._public_key
+ else:
+ with open(self._path, 'r', encoding='utf-8') as f:
+ self._public_key = f.read().strip()
+ return self._public_key
+
+ def __eq__(self, other):
+ return isinstance(other, KeyFile) and self._path == other._path and self.public_key() == other.public_key()
+
+ def __repr__(self):
+ return f'KeyFile(path={self._path}, key={self.public_key()})'
+
+
+class InMemoryPublicKey(Key):
+ def __init__(self, content: str):
+ self.content = content
+
+ def public_key(self) -> str:
+ return self.content
+
+ def __eq__(self, other):
+ return isinstance(other, InMemoryPublicKey) and self.content == other.content
+
+
+class AuthorizedKeyManager:
+ def __init__(self, shell: sh.SSHShell):
+ self._shell = shell
+
+ def add(self, host: typing.Optional[str], user: str, key: Key):
+ if host is None:
+ self._add_local(user, key)
+ else:
+ self._add_remote(host, user, key)
+
+ def _add_local(self, user: str, key: Key):
+ cmd = self._add_by_editing_file(user, key)
+ rc, output = self._shell.local_shell.get_rc_and_error(user, cmd)
+ if rc != 0:
+ # unlikely
+ raise Error(output)
+
+ def _add_remote(self, host: str, user: str, key: Key):
+ if self._shell.can_run_as(host, user):
+ shell_user = user
+ elif self._shell.can_run_as(host, 'root'):
+ shell_user = 'root'
+ else:
+ shell_user = None
+ if shell_user is not None:
+ cmd = self._add_by_editing_file(user, key)
+ rc, msg = self._shell.get_rc_and_error(host, shell_user, cmd)
+ if rc != 0:
+ raise Error(f'Failed configuring SSH passwordless with {user}@{host}: {msg}')
+ else:
+ user_info = pwd.getpwnam(user)
+ if isinstance(key, KeyFile) and key.public_key_file() is not None:
+ if os.stat(key.public_key_file()).st_uid == user_info.pw_uid:
+ self._add_by_ssh_copy_id(user, host, key.public_key_file())
+ else:
+ with tempfile.NamedTemporaryFile('w', encoding='utf-8', suffix='.pub') as tmp:
+ os.chown(tmp.fileno(), user_info.pw_uid, user_info.pw_gid)
+ print(key.public_key(), file=tmp)
+ tmp.flush()
+ self._add_by_ssh_copy_id(user, host, tmp.name)
+ else:
+ with tempfile.NamedTemporaryFile('w', encoding='utf-8', suffix='.pub') as tmp:
+ os.chown(tmp.fileno(), user_info.pw_uid, user_info.pw_gid)
+ print(key.public_key(), file=tmp)
+ tmp.flush()
+ self._add_by_ssh_copy_id(user, host, tmp.name)
+
+ @classmethod
+ def _add_by_editing_file(cls, user: str, key: Key):
+ public_key = key.public_key()
+ dir = f'~{user}/.ssh'
+ file = f'{dir}/authorized_keys'
+ cmd = f'''if ! grep '{public_key}' {file} > /dev/null; then
+ if [ -s {file} ]; then
+ sed -i '$a {public_key}' {file}
+ else
+ mkdir -p {dir}
+ chown {user}: {dir}
+ chmod 0700 {dir}
+ echo '{public_key}' > {file}
+ chmod 0600 {file}
+ fi
+ chown {user}: {file}
+fi'''
+ return cmd
+
+ def _add_by_ssh_copy_id(self, user, host, key_path):
+ cmd = "ssh-copy-id -f -i '{}' '{}@{}' &> /dev/null".format(key_path, user, host)
+ logger.info("Configuring SSH passwordless with %s@%s", user, host)
+ result = self._shell.local_shell.su_subprocess_run(
+ self._shell.local_user, cmd,
+ tty=True,
+ )
+ if result.returncode != 0:
+ raise Error(f'Failed configuring SSH passwordless with {user}@{host}.')
+
+
+class AgentClient:
+ def __init__(self, socket_path: typing.Optional[str] = None):
+ if socket_path is None:
+ if 'SSH_AUTH_SOCK' not in os.environ:
+ raise AgentNotAvailableError("ssh-agent is not available.")
+ self.socket_path = None
+ else:
+ self.socket_path = socket_path
+ self.shell = sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': self.socket_path} if self.socket_path else None)
+
+ def list(self) -> typing.List[Key]:
+ cmd = 'ssh-add -L'
+ rc, stdout, stderr = self.shell.get_rc_stdout_stderr(None, cmd)
+ if rc == 1:
+ raise NoKeysInAgentError(stderr)
+ elif rc == 2:
+ raise AgentNotAvailableError(stderr)
+ elif rc != 0:
+ raise sh.CommandFailure(cmd, None, None, stderr)
+ return [InMemoryPublicKey(line) for line in stdout.splitlines()]
+
+
+class KeyFileManager:
+ KNOWN_KEY_TYPES = ['rsa', 'ed25519', 'ecdsa'] # dsa is not listed here as it is not so secure
+ KNOWN_PUBLIC_KEY_FILENAME_PATTERN = re.compile('/id_(?:{})\\.pub$'.format('|'.join(KNOWN_KEY_TYPES)))
+
+ def __init__(self, shell: sh.ClusterShell):
+ self.cluster_shell = sh.ClusterShell(shell.local_shell, shell.user_of_host, raise_ssh_error=True)
+
+ def list_public_key_for_user(self, host: typing.Optional[str], user: str) -> typing.List[str]:
+ result = self.cluster_shell.subprocess_run_without_input(
+ host, user,
+ 'ls ~/.ssh/id_*.pub',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ if result.returncode != 0:
+ return list()
+ return [
+ filename
+ for filename in sh.Utils.decode_str(result.stdout).splitlines()
+ if self.KNOWN_PUBLIC_KEY_FILENAME_PATTERN.search(filename)
+ ]
+
+ def load_public_keys_for_user(self, host: typing.Optional[str], user: str) -> typing.List[InMemoryPublicKey]:
+ filenames = self.list_public_key_for_user(host, user)
+ if not filenames:
+ return list()
+ cmd = f'cat ~{user}/.ssh/{{{",".join(filenames)}}}'
+ result = self.cluster_shell.subprocess_run_without_input(
+ host, user,
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ if result.returncode != 0:
+ raise sh.CommandFailure(cmd, host, user, sh.Utils.decode_str(result.stderr).strip())
+ return [InMemoryPublicKey(line) for line in sh.Utils.decode_str(result.stdout).splitlines()]
+
+ def ensure_key_pair_exists_for_user(
+ self,
+ host: typing.Optional[str],
+ user: str,
+ ) -> typing.Tuple[bool, typing.List[InMemoryPublicKey]]:
+ """Ensure at least one keypair exists for the specified user. If it does not exist, generate a new one.
+
+ Return (is_generated, list_of_public_keys):
+
+ * is_generated: whether a new keypair is generated
+ * list_of_public_keys: all public keys of known types, including the newly generated one
+ """
+ script = '''if [ ! \\( {condition} \\) ]; then
+ ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -C "Cluster internal on $(hostname)" -N '' <> /dev/null
+ echo 'GENERATED=1'
+fi
+for file in ~/.ssh/id_{{{pattern}}}; do
+ if [ -f "$file" ]; then
+ if ! [ -f "$file".pub ]; then
+ ssh-keygen -y -f "$file" > "$file".pub
+ fi
+ cat "$file".pub
+ fi
+done
+'''.format(
+ condition=' -o '.join([f'-f ~/.ssh/id_{t}' for t in self.KNOWN_KEY_TYPES]),
+ pattern=','.join(self.KNOWN_KEY_TYPES),
+ )
+ result = self.cluster_shell.subprocess_run_without_input(
+ host, user,
+ script,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ if result.returncode != 0:
+ print(script)
+ print(result.stdout)
+ raise sh.CommandFailure(f'Script({script[:16]}...) failed. rc = {result.returncode}', host, user, sh.Utils.decode_str(result.stderr).strip())
+ generated = False
+ keys = list()
+ for line in sh.Utils.decode_str(result.stdout).splitlines():
+ if line == 'GENERATED=1':
+ generated = True
+ else:
+ keys.append(InMemoryPublicKey(line))
+ return generated, keys
diff --git a/crmsh/template.py b/crmsh/template.py
new file mode 100644
index 0000000..376f64b
--- /dev/null
+++ b/crmsh/template.py
@@ -0,0 +1,183 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import os
+import re
+from . import config
+from . import userdir
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+def get_var(l, key):
+ for s in l:
+ a = s.split()
+ if len(a) == 2 and a[0] == key:
+ return a[1]
+ return ''
+
+
+def chk_var(l, key):
+ for s in l:
+ a = s.split()
+ if len(a) == 2 and a[0] == key and a[1]:
+ return True
+ return False
+
+
+def chk_key(l, key):
+ for s in l:
+ a = s.split()
+ if len(a) >= 1 and a[0] == key:
+ return True
+ return False
+
+
+def validate_template(l):
+ 'Test for required stuff in a template.'
+ if not chk_var(l, '%name'):
+ logger.error("invalid template: missing '%name'")
+ return False
+ if not chk_key(l, '%generate'):
+ logger.error("invalid template: missing '%generate'")
+ return False
+ g = l.index('%generate')
+ if not (chk_key(l[0:g], '%required') or chk_key(l[0:g], '%optional')):
+ logger.error("invalid template: missing '%required' or '%optional'")
+ return False
+ return True
+
+
+def fix_tmpl_refs(l, ident, pfx):
+ for i, tmpl in enumerate(l):
+ l[i] = tmpl.replace(ident, pfx)
+
+
+def fix_tmpl_refs_re(l, regex, repl):
+ for i, tmpl in enumerate(l):
+ l[i] = re.sub(regex, repl, tmpl)
+
+
+class LoadTemplate(object):
+ '''
+ Load a template and its dependencies, generate a
+ configuration file which should be relatively easy and
+ straightforward to parse.
+ '''
+ edit_instructions = '''# Edit instructions:
+#
+# Add content only at the end of lines starting with '%%'.
+# Only add content, don't remove or replace anything.
+# The parameters following '%required' are not optional,
+# unlike those following '%optional'.
+# You may also add comments for future reference.'''
+ no_more_edit = '''# Don't edit anything below this line.'''
+
+ def __init__(self, name):
+ self.name = name
+ self.all_pre_gen = []
+ self.all_post_gen = []
+ self.all_pfx = []
+
+ def new_pfx(self, name):
+ i = 1
+ pfx = name
+ while pfx in self.all_pfx:
+ pfx = "%s_%d" % (name, i)
+ i += 1
+ self.all_pfx.append(pfx)
+ return pfx
+
+ def generate(self):
+ return '\n'.join(
+ ["# Configuration: %s" % self.name,
+ '',
+ self.edit_instructions,
+ '',
+ '\n'.join(self.all_pre_gen),
+ self.no_more_edit,
+ '',
+ '%generate',
+ '\n'.join(self.all_post_gen)])
+
+ def write_config(self, name):
+ try:
+ f = open("%s/%s" % (userdir.CRMCONF_DIR, name), "w")
+ except IOError as msg:
+ logger.error("open: %s", msg)
+ return False
+ print(self.generate(), file=f)
+ f.close()
+ return True
+
+ def load_template(self, tmpl):
+ try:
+ l = open(os.path.join(config.path.sharedir, 'templates', tmpl)).read().split('\n')
+ except IOError as msg:
+ logger.error("open: %s", msg)
+ return ''
+ if not validate_template(l):
+ return ''
+ logger.info("pulling in template %s", tmpl)
+ g = l.index('%generate')
+ pre_gen = l[0:g]
+ post_gen = l[g+1:]
+ name = get_var(pre_gen, '%name')
+ for s in l[0:g]:
+ if s.startswith('%depends_on'):
+ a = s.split()
+ if len(a) != 2:
+ logger.warning("%s: wrong usage", s)
+ continue
+ tmpl_id = a[1]
+ tmpl_pfx = self.load_template(a[1])
+ if tmpl_pfx:
+ fix_tmpl_refs(post_gen, '%'+tmpl_id, '%'+tmpl_pfx)
+ pfx = self.new_pfx(name)
+ fix_tmpl_refs(post_gen, '%_:', '%'+pfx+':')
+ # replace remaining %_, it may be useful at times
+ fix_tmpl_refs(post_gen, '%_', pfx)
+ v_idx = pre_gen.index('%required') or pre_gen.index('%optional')
+ pre_gen.insert(v_idx, '%pfx ' + pfx)
+ self.all_pre_gen += pre_gen
+ self.all_post_gen += post_gen
+ return pfx
+
+ def post_process(self, params):
+ pfx_re = '(%s)' % '|'.join(self.all_pfx)
+ for n in params:
+ fix_tmpl_refs(self.all_pre_gen, '%% '+n, "%% "+n+" "+params[n])
+ fix_tmpl_refs_re(self.all_post_gen,
+ '%' + pfx_re + '([^:]|$)', r'\1\2')
+ # process %if ... [%else] ... %fi
+ rmidx_l = []
+ if_seq = False
+ outcome = False # unnecessary, but to appease lints
+ for i in range(len(self.all_post_gen)):
+ s = self.all_post_gen[i]
+ if if_seq:
+ a = s.split()
+ if len(a) >= 1 and a[0] == '%fi':
+ if_seq = False
+ rmidx_l.append(i)
+ elif len(a) >= 1 and a[0] == '%else':
+ outcome = not outcome
+ rmidx_l.append(i)
+ else:
+ if not outcome:
+ rmidx_l.append(i)
+ continue
+ if not s:
+ continue
+ a = s.split()
+ if len(a) == 2 and a[0] == '%if':
+ outcome = not a[1].startswith('%') # not replaced -> false
+ if_seq = True
+ rmidx_l.append(i)
+ rmidx_l.reverse()
+ for i in rmidx_l:
+ del self.all_post_gen[i]
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/term.py b/crmsh/term.py
new file mode 100644
index 0000000..1ac309d
--- /dev/null
+++ b/crmsh/term.py
@@ -0,0 +1,180 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import sys
+import re
+# from: http://code.activestate.com/recipes/475116/
+
+# A module that can be used to portably generate formatted output to
+# a terminal.
+# Defines a set of instance variables whose
+# values are initialized to the control sequence necessary to
+# perform a given action. These can be simply included in normal
+# output to the terminal:
+# >>> print 'This is '+term.colors.GREEN+'green'+term.colors.NORMAL
+# Alternatively, the `render()` method can used, which replaces
+# '${action}' with the string required to perform 'action':
+# >>> print term.render('This is ${GREEN}green${NORMAL}')
+# If the terminal doesn't support a given action, then the value of
+# the corresponding instance variable will be set to ''. As a
+# result, the above code will still work on terminals that do not
+# support color, except that their output will not be colored.
+# Also, this means that you can test whether the terminal supports a
+# given action by simply testing the truth value of the
+# corresponding instance variable:
+# >>> if term.colors.CLEAR_SCREEN:
+# ... print 'This terminal supports clearning the screen.'
+# Finally, if the width and height of the terminal are known, then
+# they will be stored in the `COLS` and `LINES` attributes.
+
+
+class colors(object):
+ # Cursor movement:
+ BOL = '' #: Move the cursor to the beginning of the line
+ UP = '' #: Move the cursor up one line
+ DOWN = '' #: Move the cursor down one line
+ LEFT = '' #: Move the cursor left one char
+ RIGHT = '' #: Move the cursor right one char
+ # Deletion:
+ CLEAR_SCREEN = '' #: Clear the screen and move to home position
+ CLEAR_EOL = '' #: Clear to the end of the line.
+ CLEAR_BOL = '' #: Clear to the beginning of the line.
+ CLEAR_EOS = '' #: Clear to the end of the screen
+ # Output modes:
+ BOLD = '' #: Turn on bold mode
+ BLINK = '' #: Turn on blink mode
+ DIM = '' #: Turn on half-bright mode
+ REVERSE = '' #: Turn on reverse-video mode
+ UNDERLINE = '' #: Turn on underline mode
+ NORMAL = '' #: Turn off all modes
+ # Cursor display:
+ HIDE_CURSOR = '' #: Make the cursor invisible
+ SHOW_CURSOR = '' #: Make the cursor visible
+ # Terminal size:
+ COLS = None #: Width of the terminal (None for unknown)
+ LINES = None #: Height of the terminal (None for unknown)
+ # Foreground colors:
+ BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
+ # Background colors:
+ BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
+ BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
+ RLIGNOREBEGIN = '\001'
+ RLIGNOREEND = '\002'
+
+
+_STRING_CAPABILITIES = """
+BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
+CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
+BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
+HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
+
+_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
+_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
+
+
+def _tigetstr(cap_name):
+ import curses
+ if not curses.tigetstr(cap_name):
+ return None
+ from .utils import to_ascii
+ cap = to_ascii(curses.tigetstr(cap_name)) or ''
+
+ # String capabilities can include "delays" of the form "$<2>".
+ # For any modern terminal, we should be able to just ignore
+ # these, so strip them out.
+ # terminof(5) states that:
+ # A "/" suffix indicates that the padding is mandatory and forces a
+ # delay of the given number of milliseconds even on devices for which
+ # xon is present to indicate flow control.
+ # So let's respect that. But not the optional ones.
+ cap = re.sub(r'\$<\d+>[*]?', '', cap)
+
+ # To switch back to "NORMAL", we use sgr0, which resets "everything" to defaults.
+ # That on some terminals includes ending "alternate character set mode".
+ # Which is usually done by appending '\017'. Unfortunately, less -R
+ # does not eat that, but shows it as an annoying inverse '^O'
+ # Instead of falling back to less -r, which would have other implications as well,
+ # strip off that '\017': we don't use the alternative character set,
+ # so we won't need to reset it either.
+ if cap_name == 'sgr0':
+ cap = re.sub(r'\017$', '', cap)
+
+ return cap
+
+
+def _lookup_caps():
+ import curses
+ from .utils import to_ascii
+
+ # Look up numeric capabilities.
+ colors.COLS = curses.tigetnum('cols')
+ colors.LINES = curses.tigetnum('lines')
+ # Look up string capabilities.
+ for capability in _STRING_CAPABILITIES:
+ (attrib, cap_name) = capability.split('=')
+ setattr(colors, attrib, _tigetstr(cap_name) or '')
+ # Colors
+ set_fg = _tigetstr('setf')
+ if set_fg:
+ for i, color in zip(list(range(len(_COLORS))), _COLORS):
+ setattr(colors, color, to_ascii(curses.tparm(set_fg.encode('utf-8'), i)) or '')
+ set_fg_ansi = _tigetstr('setaf')
+ if set_fg_ansi:
+ for i, color in zip(list(range(len(_ANSICOLORS))), _ANSICOLORS):
+ setattr(colors, color, to_ascii(curses.tparm(set_fg_ansi.encode('utf-8'), i)) or '')
+ set_bg = _tigetstr('setb')
+ if set_bg:
+ for i, color in zip(list(range(len(_COLORS))), _COLORS):
+ setattr(colors, 'BG_'+color, to_ascii(curses.tparm(set_bg.encode('utf-8'), i)) or '')
+ set_bg_ansi = _tigetstr('setab')
+ if set_bg_ansi:
+ for i, color in zip(list(range(len(_ANSICOLORS))), _ANSICOLORS):
+ setattr(colors, 'BG_'+color, to_ascii(curses.tparm(set_bg_ansi.encode('utf-8'), i)) or '')
+
+
+def init():
+ """
+ Initialize attributes with appropriate values for the current terminal.
+
+ `_term_stream` is the stream that will be used for terminal
+ output; if this stream is not a tty, then the terminal is
+ assumed to be a dumb terminal (i.e., have no capabilities).
+ """
+ _term_stream = sys.stdout
+ # Curses isn't available on all platforms
+ try:
+ import curses
+ except ImportError:
+ sys.stderr.write("INFO: no curses support: you won't see colors\n")
+ return
+ # If the stream isn't a tty, then assume it has no capabilities.
+ from . import config
+ if not _term_stream.isatty() and 'color-always' not in config.color.style:
+ return
+ # Check the terminal type. If we fail, then assume that the
+ # terminal has no capabilities.
+ try:
+ curses.setupterm()
+ except curses.error:
+ return
+
+ _lookup_caps()
+
+
+def render(template):
+ """
+ Replace each $-substitutions in the given template string with
+ the corresponding terminal control string (if it's defined) or
+ '' (if it's not).
+ """
+ def render_sub(match):
+ s = match.group()
+ return getattr(colors, s[2:-1].upper(), '')
+ return re.sub(r'\${\w+}', render_sub, template)
+
+
+def is_color(s):
+ return hasattr(colors, s.upper())
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/tmpfiles.py b/crmsh/tmpfiles.py
new file mode 100644
index 0000000..3c1e608
--- /dev/null
+++ b/crmsh/tmpfiles.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+'''
+Files added to tmpfiles are removed at program exit.
+'''
+
+import os
+import shutil
+import atexit
+from tempfile import mkstemp, mkdtemp
+
+from . import utils
+
+_FILES = []
+_DIRS = []
+
+
+def _exit_handler():
+ "Called at program exit"
+ for f in _FILES:
+ try:
+ os.unlink(f)
+ except OSError:
+ pass
+ for d in _DIRS:
+ try:
+ shutil.rmtree(d)
+ except OSError:
+ pass
+
+
+def _mkdir(directory):
+ if not os.path.isdir(directory):
+ try:
+ os.makedirs(directory)
+ except OSError as err:
+ raise ValueError("Failed to create directory: %s" % (err))
+
+
+def add(filename):
+ '''
+ Remove the named file at program exit.
+ '''
+ if len(_FILES) + len(_DIRS) == 0:
+ atexit.register(_exit_handler)
+ _FILES.append(filename)
+
+
+def create(directory=utils.get_tempdir(), prefix='crmsh_'):
+ '''
+ Create a temporary file and remove it at program exit.
+ Returns (fd, filename)
+ '''
+ _mkdir(directory)
+ fd, fname = mkstemp(dir=directory, prefix=prefix)
+ add(fname)
+ return fd, fname
+
+
+def create_dir(directory=utils.get_tempdir(), prefix='crmsh_'):
+ '''
+ Create a temporary directory and remove it at program exit.
+ '''
+ _mkdir(directory)
+ ret = mkdtemp(dir=directory, prefix=prefix)
+ if len(_FILES) + len(_DIRS) == 0:
+ atexit.register(_exit_handler)
+ _DIRS.append(ret)
+ return ret
diff --git a/crmsh/ui_assist.py b/crmsh/ui_assist.py
new file mode 100644
index 0000000..f5d1b68
--- /dev/null
+++ b/crmsh/ui_assist.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+from . import utils
+from . import command
+from . import completers as compl
+from . import xmlutil
+from .cibconfig import cib_factory
+
+
+def rmattrs(e, *attrs):
+ "remove the given attributes from an XML element"
+ for attr in attrs:
+ if attr in e.attrib:
+ del e.attrib[attr]
+
+
+class Assist(command.UI):
+ '''
+ The assist UI collects what could be called
+ configuration macros. Things like merging
+ multiple resources into a template, or building
+ a colocated set with a relation to a dummy
+ resource.
+ '''
+ name = "assist"
+
+ def __init__(self):
+ command.UI.__init__(self)
+
+ def requires(self):
+ return cib_factory.initialize()
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(cib_factory.prim_id_list))
+ def do_template(self, context, *primitives):
+ '''
+ Create a shared template for the given primitives
+ '''
+ if len(primitives) < 1:
+ context.fatal_error("Expected at least one primitive argument")
+ objs = [cib_factory.find_resource(p) for p in primitives]
+ for prim, obj in zip(primitives, objs):
+ if obj is None:
+ context.fatal_error("Primitive %s not found" % (prim))
+ if objs and all(obj.obj_type == 'primitive' for obj in objs):
+ return self._template_primitives(context, objs)
+ context.fatal_error("Cannot create a template for the given resources")
+
+ def _template_primitives(self, context, primitives):
+ """
+ Try to template the given primitives:
+ Templating means creating a rsc_template and moving
+ shared attributes and other commonalities into that template
+ (this second step is currently not available)
+ """
+ shared_template = None
+ if all('template' in obj.node.attrib for obj in primitives):
+ return True
+ if len(set(xmlutil.mk_rsc_type(obj.node) for obj in primitives)) != 1:
+ context.fatal_error("Cannot template the given primitives")
+
+ node = primitives[0].node
+ template_name = self.make_unique_name('template-%s-' % (node.get('type').lower()))
+ shared_template = cib_factory.create_object('rsc_template', template_name,
+ xmlutil.mk_rsc_type(node))
+ if not shared_template:
+ context.fatal_error("Error creating template")
+ for obj in primitives:
+ obj.node.set('template', template_name)
+ rmattrs(obj.node, 'class', 'provider', 'type')
+ obj.set_updated()
+
+ if not self._pull_attributes(context, shared_template, primitives):
+ context.fatal_error("Error when copying attributes into template")
+
+ context.info("Template created: %s" % (template_name))
+ return True
+
+ def _pull_attributes(self, context, template, primitives):
+ '''
+ TODO: take any attributes shared by all primitives and
+ move them into the shared template
+ '''
+ return True
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(cib_factory.prim_id_list))
+ @command.name('weak-bond')
+ @command.alias('weak_bond')
+ def do_weak_bond(self, context, *nodes):
+ '''
+ Create a 'weak' colocation:
+ Colocating a non-sequential resource set with
+ a dummy resource which is not monitored creates,
+ in effect, a colocation which does not imply any
+ internal relationship between resources.
+ '''
+ if len(nodes) < 2:
+ context.fatal_error("Need at least two arguments")
+
+ for node in nodes:
+ obj = cib_factory.find_resource(node)
+ if not obj:
+ context.fatal_error("Object not found: %s" % (node))
+ if not xmlutil.is_primitive(obj.node):
+ context.fatal_error("Object not primitive: %s" % (node))
+
+ constraint_name = self.make_unique_name('place-constraint-')
+ dummy_name = self.make_unique_name('place-dummy-')
+ print("Create weak bond / independent colocation")
+ print("The following elements will be created:")
+ print(" * Colocation constraint, ID: %s" % (constraint_name))
+ print(" * Dummy resource, ID: %s" % (dummy_name))
+ if not utils.can_ask() or utils.ask("Create resources?"):
+ cib_factory.create_object('primitive', dummy_name, 'ocf:heartbeat:Dummy')
+ colo = ['colocation', constraint_name, 'inf:', '(']
+ colo.extend(nodes)
+ colo.append(')')
+ colo.append(dummy_name)
+ cib_factory.create_object(*colo)
+
+ def make_unique_name(self, prefix):
+ n = 0
+ while n < 1000:
+ n += 1
+ name = "%s%s" % (prefix, n)
+ for _id in cib_factory.id_list():
+ if name == _id.lower():
+ continue
+ return name
+ raise ValueError("Failed to generate unique resource ID with prefix '%s'" % (prefix))
diff --git a/crmsh/ui_cib.py b/crmsh/ui_cib.py
new file mode 100644
index 0000000..e33856b
--- /dev/null
+++ b/crmsh/ui_cib.py
@@ -0,0 +1,223 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import glob
+from . import command
+from . import xmlutil
+from . import utils
+from . import ui_cibstatus
+from . import constants
+from . import config
+from . import options
+from .cibstatus import cib_status
+from .cibconfig import cib_factory
+from .sh import ShellUtils
+from . import tmpfiles
+from . import completers as compl
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+_NEWARGS = ('force', '--force', 'withstatus', 'empty')
+
+
+class CibShadow(command.UI):
+ '''
+ CIB shadow management class
+ '''
+ name = "cib"
+ extcmd = ">/dev/null </dev/null crm_shadow -b"
+ extcmd_stdout = "</dev/null crm_shadow -b"
+
+ def requires(self):
+ if not utils.is_program('crm_shadow'):
+ logger_utils.no_prog_err('crm_shadow')
+ return False
+ return True
+
+ @command.level(ui_cibstatus.CibStatusUI)
+ def do_cibstatus(self):
+ pass
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.null, compl.choice(_NEWARGS))
+ def do_new(self, context, *args):
+ "usage: new [<shadow_cib>] [withstatus] [force] [empty]"
+ argl = list(args)
+ opt_l = utils.fetch_opts(argl, ["force", "--force", "withstatus", "empty"])
+ if len(argl) > 1:
+ context.fatal_error("Unexpected argument(s): " + ' '.join(argl))
+
+ name = None
+ if argl:
+ name = argl[0]
+ if not utils.is_filename_sane(name):
+ context.fatal_error("Bad filename: " + name)
+ if name in (constants.tmp_cib_prompt, constants.live_cib_prompt):
+ context.fatal_error("Shadow name '%s' is not allowed" % (name))
+ del argl[0]
+ constants.tmp_cib = False
+ else:
+ fd, fname = tmpfiles.create(directory=xmlutil.cib_shadow_dir(), prefix="shadow.crmsh_")
+ name = os.path.basename(fname).replace("shadow.", "")
+ constants.tmp_cib = True
+
+ if "empty" in opt_l:
+ new_cmd = "%s -e '%s'" % (self.extcmd, name)
+ else:
+ new_cmd = "%s -c '%s'" % (self.extcmd, name)
+ if constants.tmp_cib or config.core.force or "force" in opt_l or "--force" in opt_l:
+ new_cmd = "%s --force" % new_cmd
+ if utils.ext_cmd(new_cmd) == 0:
+ context.info("%s shadow CIB created" % name)
+ self.do_use(context, name)
+ if "withstatus" in opt_l:
+ cib_status.load("shadow:%s" % name)
+
+ def _find_pe(self, context, infile):
+ 'Find a pe input'
+ for p in ("%s/%s", "%s/%s.bz2", "%s/pe-*-%s.bz2"):
+ fl = glob.glob(p % (config.path.pe_state_dir, infile))
+ if fl:
+ break
+ if not fl:
+ context.fatal_error("no %s pe input file" % infile)
+ if len(fl) > 1:
+ context.fatal_error("more than one %s pe input file: %s" %
+ (infile, ' '.join(fl)))
+ if not fl[0]:
+ context.fatal_error("bad %s pe input file" % infile)
+ return fl[0]
+
+ @command.skill_level('administrator')
+ @command.completers(compl.null, compl.shadows)
+ def do_import(self, context, infile, name=None):
+ "usage: import {<file>|<number>} [<shadow>]"
+ if name and not utils.is_filename_sane(name):
+ context.fatal_error("Bad filename: " + name)
+ # where's the input?
+ if not os.access(infile, os.F_OK):
+ if "/" in infile:
+ context.fatal_error(str(infile) + ": no such file")
+ infile = self._find_pe(context, infile)
+ if not name:
+ name = os.path.basename(infile).replace(".bz2", "")
+ if not xmlutil.pe2shadow(infile, name):
+ context.fatal_error("Error copying PE file to shadow: %s -> %s" % (infile, name))
+ # use the shadow and load the status from there
+ return self.do_use(context, name, "withstatus")
+
+ @command.skill_level('administrator')
+ @command.completers(compl.shadows)
+ def do_delete(self, context, name):
+ "usage: delete <shadow_cib>"
+ if not utils.is_filename_sane(name):
+ context.fatal_error("Bad filename: " + name)
+ if utils.get_cib_in_use() == name:
+ context.fatal_error("%s shadow CIB is in use" % name)
+ if utils.ext_cmd("%s -D '%s' --force" % (self.extcmd, name)) == 0:
+ context.info("%s shadow CIB deleted" % name)
+ else:
+ context.fatal_error("failed to delete %s shadow CIB" % name)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.shadows)
+ def do_reset(self, context, name):
+ "usage: reset <shadow_cib>"
+ if not utils.is_filename_sane(name):
+ context.fatal_error("Bad filename: " + name)
+ if utils.ext_cmd("%s -r '%s'" % (self.extcmd, name)) == 0:
+ context.info("copied live CIB to %s" % name)
+ else:
+ context.fatal_error("failed to copy live CIB to %s" % name)
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers(compl.shadows)
+ def do_commit(self, context, name=None):
+ "usage: commit [<shadow_cib>]"
+ if name and not utils.is_filename_sane(name):
+ context.fatal_error("Bad filename: " + name)
+ if not name:
+ name = utils.get_cib_in_use()
+ if not name:
+ context.fatal_error("There is nothing to commit")
+ if utils.ext_cmd("%s -C '%s' --force" % (self.extcmd, name)) == 0:
+ context.info("committed '%s' shadow CIB to the cluster" % name)
+ else:
+ context.fatal_error("failed to commit the %s shadow CIB" % name)
+ if constants.tmp_cib:
+ self._use('', '')
+
+ @command.skill_level('administrator')
+ def do_diff(self, context):
+ "usage: diff"
+ rc, s = ShellUtils().get_stdout(utils.add_sudo("%s -d" % self.extcmd_stdout))
+ utils.page_string(s)
+
+ @command.skill_level('administrator')
+ def do_list(self, context):
+ "usage: list"
+ if options.regression_tests:
+ for t in xmlutil.listshadows():
+ print(t)
+ else:
+ utils.multicolumn(xmlutil.listshadows())
+
+ def _use(self, name, withstatus):
+ # Choose a shadow cib for further changes. If the name
+ # provided is empty, then choose the live (cluster) cib.
+ # Don't allow ' in shadow names
+ if not name or name == "live":
+ if withstatus:
+ cib_status.load("live")
+ if constants.tmp_cib:
+ utils.ext_cmd("%s -D '%s' --force" % (self.extcmd, utils.get_cib_in_use()))
+ constants.tmp_cib = False
+ utils.clear_cib_in_use()
+ else:
+ utils.set_cib_in_use(name)
+ if withstatus:
+ cib_status.load("shadow:%s" % name)
+ return True
+
+ @command.skill_level('administrator')
+ @command.completers(compl.join(compl.shadows, compl.choice(['live'])),
+ compl.choice(['withstatus']))
+ def do_use(self, context, name='', withstatus=''):
+ "usage: use [<shadow_cib>] [withstatus]"
+ # check the name argument
+ if name and not utils.is_filename_sane(name):
+ context.fatal_error("Bad filename: " + name)
+ if name and name != "live":
+ if not os.access(xmlutil.shadowfile(name), os.F_OK):
+ context.fatal_error("%s: no such shadow CIB" % name)
+ if withstatus and withstatus != "withstatus":
+ context.fatal_error("Expected 'withstatus', got '%s'" % (withstatus))
+ # If invoked from configure
+ # take special precautions
+ if not context.previous_level_is("cibconfig"):
+ return self._use(name, withstatus)
+ if not cib_factory.has_cib_changed():
+ ret = self._use(name, withstatus)
+ # new CIB: refresh the CIB factory
+ cib_factory.refresh()
+ return ret
+ saved_cib = utils.get_cib_in_use()
+ self._use(name, '') # don't load the status yet
+ if not cib_factory.is_current_cib_equal(silent=True):
+ # user made changes and now wants to switch to a
+ # different and unequal CIB; we refuse to cooperate
+ context.error_message("the requested CIB is different from the current one")
+ if config.core.force:
+ context.info("CIB overwrite forced")
+ elif not utils.ask("All changes will be dropped. Do you want to proceed?"):
+ self._use(saved_cib, '') # revert to the previous CIB
+ return False
+ return self._use(name, withstatus) # now load the status too
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/ui_cibstatus.py b/crmsh/ui_cibstatus.py
new file mode 100644
index 0000000..e725464
--- /dev/null
+++ b/crmsh/ui_cibstatus.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+from . import command
+from . import completers as compl
+from . import utils
+from . import ui_utils
+from . import constants
+from .cibstatus import cib_status
+
+
+_status_node_list = compl.call(cib_status.status_node_list)
+
+
+class CibStatusUI(command.UI):
+ '''
+ The CIB status section management user interface class
+ '''
+ name = "cibstatus"
+
+ @command.skill_level('expert')
+ def do_load(self, context, org):
+ "usage: load {<file>|shadow:<cib>|live}"
+ return cib_status.load(org)
+
+ @command.skill_level('expert')
+ def do_save(self, context, dest=None):
+ "usage: save [<file>|shadow:<cib>]"
+ return cib_status.save(dest)
+
+ @command.skill_level('administrator')
+ def do_origin(self, context):
+ "usage: origin"
+ state = cib_status.modified and " (modified)" or ""
+ print("%s%s" % (cib_status.origin, state))
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(['changed']))
+ def do_show(self, context, changed=""):
+ "usage: show [changed]"
+ if changed:
+ if changed != "changed":
+ context.fatal_error("Expected 'changed', got '%s'" % (changed))
+ return cib_status.list_changes()
+ return cib_status.show()
+
+ @command.skill_level('administrator')
+ @command.completers(compl.booleans)
+ def do_quorum(self, context, opt):
+ "usage: quorum <bool>"
+ if not utils.verify_boolean(opt):
+ context.fatal_error("%s: bad boolean option" % opt)
+ return cib_status.set_quorum(utils.is_boolean_true(opt))
+
+ @command.skill_level('expert')
+ @command.completers(_status_node_list, compl.choice(constants.node_states))
+ def do_node(self, context, node, state):
+ "usage: node <node> {online|offline|unclean}"
+ return cib_status.edit_node(node, state)
+
+ @command.skill_level('expert')
+ @command.completers(compl.null, compl.choice(list(cib_status.ticket_ops.keys())))
+ def do_ticket(self, context, ticket, subcmd):
+ "usage: ticket <ticket> {grant|revoke|activate|standby}"
+ return cib_status.edit_ticket(ticket, subcmd)
+
+ @command.skill_level('expert')
+ @command.completers(compl.choice(constants.ra_operations),
+ compl.call(cib_status.status_rsc_list),
+ compl.choice(list(constants.lrm_exit_codes.keys())),
+ compl.choice(list(constants.lrm_status_codes.keys())),
+ compl.choice(constants.node_states))
+ def do_op(self, context, op, rsc, rc, op_status=None, node=''):
+ "usage: op <operation> <resource> <exit_code> [<op_status>] [<node>]"
+ if rc in constants.lrm_exit_codes:
+ num_rc = constants.lrm_exit_codes[rc]
+ else:
+ num_rc = rc
+ if not num_rc.isdigit():
+ context.fatal_error("Invalid exit code '%s'" % num_rc)
+ num_op_status = op_status
+ if op_status:
+ if op_status in constants.lrm_status_codes:
+ num_op_status = constants.lrm_status_codes[op_status]
+ if not num_op_status.isdigit():
+ context.fatal_error("Invalid operation status '%s'" % num_op_status)
+ return cib_status.edit_op(op, rsc, num_rc, num_op_status, node)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(['nograph']))
+ def do_run(self, context, *args):
+ "usage: run [nograph] [v...] [scores] [utilization]"
+ return ui_utils.ptestlike(cib_status.run, '', context.get_command_name(), args)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(['nograph']))
+ def do_simulate(self, context, *args):
+ "usage: simulate [nograph] [v...] [scores] [utilization]"
+ return ui_utils.ptestlike(cib_status.simulate, '', context.get_command_name(), args)
diff --git a/crmsh/ui_cluster.py b/crmsh/ui_cluster.py
new file mode 100644
index 0000000..bff5b57
--- /dev/null
+++ b/crmsh/ui_cluster.py
@@ -0,0 +1,895 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import sys
+import re
+import argparse
+import typing
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
+
+import crmsh.parallax
+from . import command, sh
+from . import utils
+from . import scripts
+from . import completers as compl
+from . import bootstrap
+from . import corosync
+from . import qdevice
+from .cibconfig import cib_factory
+from .prun import prun
+from .service_manager import ServiceManager
+from .sh import ShellUtils
+from .ui_node import parse_option_for_nodes
+from . import constants
+
+
+from . import log
+logger = log.setup_logger(__name__)
+
+
+def parse_options(parser, args):
+ try:
+ options, args = parser.parse_known_args(list(args))
+ except:
+ return None, None
+ if hasattr(options, 'help') and options.help:
+ parser.print_help()
+ return None, None
+ utils.check_empty_option_value(options)
+ return options, args
+
+
+def _remove_completer(args):
+ try:
+ n = utils.list_cluster_nodes()
+ except:
+ n = []
+ for node in args[1:]:
+ if node in n:
+ n.remove(node)
+ return scripts.param_completion_list('remove') + n
+
+
+def script_printer():
+ from .ui_script import ConsolePrinter
+ return ConsolePrinter()
+
+
+def script_args(args):
+ from .ui_script import _nvpairs2parameters
+ return _nvpairs2parameters(args)
+
+
+def get_cluster_name():
+ cluster_name = None
+ if not ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("corosync.service"):
+ name = corosync.get_values('totem.cluster_name')
+ if name:
+ cluster_name = name[0]
+ else:
+ cluster_name = cib_factory.get_property('cluster-name')
+ return cluster_name
+
+
+class ArgparseCustomizableAction(argparse.Action):
+ def __call__(self, parser, namespace, value, option_string=None):
+ previous_value = getattr(namespace, self.dest, None)
+ parsed_value = self.parse(parser, previous_value, value, option_string=None)
+ self.validate(parser, parsed_value, option_string)
+ setattr(namespace, self.dest, parsed_value)
+
+ def parse(self, parser, previous_value, raw_value, option_string):
+ """Parse one argument and return the parsed value.
+
+ Arguments:
+ previous_value: The previous value hold in the destinating attribute.
+ raw_value: The string value to be parse.
+ option_string: The command-line option string associated with this action.
+ """
+ raise NotImplementedError
+
+ def validate(self, parser, parsed_value, option_string):
+ pass
+
+
+class ArgparseActionSplitAndAppendParseMixin(ArgparseCustomizableAction):
+ """Parse `--foo a;b --foo "c d" --foo e` into ['a', 'b', 'c', 'd', 'e']"""
+ def parse(self, parser, previous_value, raw_value, option_string):
+ items = previous_value if previous_value is not None else []
+ items.extend([x for x in re.split("[; ]", raw_value) if x])
+ return items
+
+
+class ArgparseActionUniqueListItemValidateMixin(ArgparseCustomizableAction):
+ """Validate te uniqueness of parsed list items."""
+ def validate(self, parser, parsed_value, option_string):
+ if len(parsed_value) != len(set(parsed_value)):
+ parser.error(f"Duplicated input for '{'/'.join(self.option_strings)}' option")
+
+
+class CustomAppendAction(ArgparseActionSplitAndAppendParseMixin, ArgparseActionUniqueListItemValidateMixin):
+ """
+ Custom class for argparse append action:
+ - Flatten the value like '-s "/dev/sda1;/dev/sda2"'
+ - Detect duplicated input
+ """
+ pass
+
+
+class ArgparseActionUniqueHostInListItemValidateMixin(ArgparseCustomizableAction):
+ """Validate the uniqueness of hosts in a parsed list in the format of 'user@host'"""
+ def validate(self, parser, parsed_value, option_string):
+ known_hosts = set()
+ for item in parsed_value:
+ match = re.match("^(?:[^@]+@)?([^@]+)$", item)
+ if match is None:
+ parser.error("Malformed value for option {} [<user>@]<host>: {}.".format(
+ '/'.join(self.option_strings), parsed_value
+ ))
+ host = match.group(1)
+ if host in known_hosts:
+ parser.error("Duplicated host in option {}: {}".format(
+ '/'.join(self.option_strings), parsed_value
+ ))
+ known_hosts.add(host)
+
+
+class ArgparseUserAtHostAppendAction(
+ ArgparseActionSplitAndAppendParseMixin,
+ ArgparseActionUniqueHostInListItemValidateMixin,
+):
+ pass
+
+
+
+class Cluster(command.UI):
+ '''
+ Whole cluster management.
+
+ - Package installation
+ - System configuration
+ - Network troubleshooting
+ - Perform other callouts/cluster-wide devops operations
+ '''
+ name = "cluster"
+
+ def requires(self):
+ return True
+
+ def __init__(self):
+ command.UI.__init__(self)
+ # ugly hack to allow overriding the node list
+ # for the cluster commands that operate before
+ # there is an actual cluster
+ self._inventory_nodes = None
+ self._inventory_target = None
+
+ @command.skill_level('administrator')
+ def do_start(self, context, *args):
+ '''
+ Starts the cluster stack on all nodes or specific node(s)
+ '''
+ service_check_list = ["pacemaker.service"]
+ start_qdevice = False
+ if utils.is_qdevice_configured():
+ start_qdevice = True
+ service_check_list.append("corosync-qdevice.service")
+
+ service_manager = ServiceManager()
+ node_list = parse_option_for_nodes(context, *args)
+ for node in node_list[:]:
+ if all([service_manager.service_is_active(srv, remote_addr=node) for srv in service_check_list]):
+ logger.info("The cluster stack already started on {}".format(node))
+ node_list.remove(node)
+ if not node_list:
+ return
+
+ if start_qdevice:
+ service_manager.start_service("corosync-qdevice", node_list=node_list)
+ node_list = bootstrap.start_pacemaker(node_list)
+ if start_qdevice:
+ qdevice.QDevice.check_qdevice_vote()
+ for node in node_list:
+ logger.info("The cluster stack started on {}".format(node))
+
+ @staticmethod
+ def _node_ready_to_stop_cluster_service(node):
+ """
+ Check if the specific node is ready to stop cluster service
+
+ If both corosync.service and pacemaker.service is active, return True
+ If some services started, stop them first and return False
+ """
+ service_manager = ServiceManager()
+
+ corosync_active = service_manager.service_is_active("corosync.service", remote_addr=node)
+ sbd_active = service_manager.service_is_active("sbd.service", remote_addr=node)
+ pacemaker_active = service_manager.service_is_active("pacemaker.service", remote_addr=node)
+
+ if not corosync_active:
+ if sbd_active:
+ service_manager.stop_service("corosync", remote_addr=node)
+ logger.info(f"The cluster stack stopped on {node}")
+ else:
+ logger.info(f"The cluster stack already stopped on {node}")
+ return False
+
+ elif not pacemaker_active:
+ service_manager.stop_service("corosync", remote_addr=node)
+ logger.info("The cluster stack stopped on {}".format(node))
+ return False
+
+ return True
+
+ @staticmethod
+ def _wait_for_dc(node=None):
+ """
+ Wait for the cluster's DC to become available
+ """
+ if not ServiceManager().service_is_active("pacemaker.service", remote_addr=node):
+ return
+
+ dc_deadtime = utils.get_property("dc-deadtime", peer=node) or str(constants.DC_DEADTIME_DEFAULT)
+ dc_timeout = int(dc_deadtime.strip('s')) + 5
+ try:
+ utils.check_function_with_timeout(utils.get_dc, wait_timeout=dc_timeout, peer=node)
+ except TimeoutError:
+ logger.error("No DC found currently, please wait if the cluster is still starting")
+ raise utils.TerminateSubCommand
+
+ @staticmethod
+ def _set_dlm(node=None):
+ """
+ When dlm running and quorum is lost, before stop cluster service, should set
+ enable_quorum_fencing=0, enable_quorum_lockspace=0 for dlm config option
+ """
+ if utils.is_dlm_running(node) and not utils.is_quorate(node):
+ logger.debug("Quorum is lost; Set enable_quorum_fencing=0 and enable_quorum_lockspace=0 for dlm")
+ utils.set_dlm_option(peer=node, enable_quorum_fencing=0, enable_quorum_lockspace=0)
+
+ @command.skill_level('administrator')
+ def do_stop(self, context, *args):
+ '''
+ Stops the cluster stack on all nodes or specific node(s)
+ '''
+ node_list = parse_option_for_nodes(context, *args)
+ node_list = [n for n in node_list if self._node_ready_to_stop_cluster_service(n)]
+ if not node_list:
+ return
+ logger.debug(f"stop node list: {node_list}")
+
+ self._wait_for_dc(node_list[0])
+
+ self._set_dlm(node_list[0])
+
+ service_manager = ServiceManager()
+ # Stop pacemaker since it can make sure cluster has quorum until stop corosync
+ node_list = service_manager.stop_service("pacemaker", node_list=node_list)
+ # Then, stop qdevice if is active
+ if service_manager.service_is_active("corosync-qdevice.service"):
+ service_manager.stop_service("corosync-qdevice.service", node_list=node_list)
+ # Last, stop corosync
+ node_list = service_manager.stop_service("corosync", node_list=node_list)
+
+ for node in node_list:
+ logger.info("The cluster stack stopped on {}".format(node))
+
+ @command.skill_level('administrator')
+ def do_restart(self, context, *args):
+ '''
+ Restarts the cluster stack on all nodes or specific node(s)
+ '''
+ parse_option_for_nodes(context, *args)
+ self.do_stop(context, *args)
+ self.do_start(context, *args)
+
+ @command.skill_level('administrator')
+ def do_enable(self, context, *args):
+ '''
+ Enable the cluster services on this node
+ '''
+ node_list = parse_option_for_nodes(context, *args)
+ service_manager = ServiceManager()
+ node_list = service_manager.enable_service("pacemaker.service", node_list=node_list)
+ if service_manager.service_is_available("corosync-qdevice.service") and utils.is_qdevice_configured():
+ service_manager.enable_service("corosync-qdevice.service", node_list=node_list)
+ for node in node_list:
+ logger.info("Cluster services enabled on %s", node)
+
+ @command.skill_level('administrator')
+ def do_disable(self, context, *args):
+ '''
+ Disable the cluster services on this node
+ '''
+ node_list = parse_option_for_nodes(context, *args)
+ service_manager = ServiceManager()
+ node_list = service_manager.disable_service("pacemaker.service", node_list=node_list)
+ service_manager.disable_service("corosync-qdevice.service", node_list=node_list)
+ for node in node_list:
+ logger.info("Cluster services disabled on %s", node)
+
+ def _args_implicit(self, context, args, name):
+ '''
+ handle early non-nvpair arguments as
+ values in an implicit list
+ '''
+ args = list(args)
+ vals = []
+ while args and args[0].find('=') == -1:
+ vals.append(args[0])
+ args = args[1:]
+ if vals:
+ return args + ['%s=%s' % (name, ','.join(vals))]
+ return args
+
+ # @command.completers_repeating(compl.call(scripts.param_completion_list, 'init'))
+ @command.skill_level('administrator')
+ def do_init(self, context, *args):
+ '''
+ Initialize a cluster.
+ '''
+ def looks_like_hostnames(lst):
+ sectionlist = bootstrap.INIT_STAGES
+ return all(not (l.startswith('-') or l in sectionlist) for l in lst)
+ if len(args) > 0:
+ if '--dry-run' in args or looks_like_hostnames(args):
+ args = ['--yes', '--nodes'] + [arg for arg in args if arg != '--dry-run']
+ parser = ArgumentParser(description="""
+Initialize a cluster from scratch. This command configures
+a complete cluster, and can also add additional cluster
+nodes to the initial one-node cluster using the --nodes
+option.""", usage="init [options] [STAGE]", epilog="""
+
+Stage can be one of:
+ ssh Create SSH keys for passwordless SSH between cluster nodes
+ csync2 Configure csync2
+ corosync Configure corosync
+ sbd Configure SBD (requires -s <dev>)
+ cluster Bring the cluster online
+ ocfs2 Configure OCFS2 (requires -o <dev>) NOTE: this is a Technical Preview
+ vgfs Create volume group and filesystem (ocfs2 template only,
+ requires -o <dev>) NOTE: this stage is an alias of ocfs2 stage
+ admin Create administration virtual IP (optional)
+ qdevice Configure qdevice and qnetd
+
+Note:
+ - If stage is not specified, the script will run through each stage
+ in sequence, with prompts for required information.
+
+Examples:
+ # Setup the cluster on the current node
+ crm cluster init -y
+
+ # Setup the cluster with multiple nodes
+ (NOTE: the current node will be part of the cluster even not listed in the -N option as below)
+ crm cluster init -N node1 -N node2 -N node3 -y
+
+ # Setup the cluster on the current node, with two network interfaces
+ crm cluster init -i eth1 -i eth2 -y
+
+ # Setup the cluster on the current node, with disk-based SBD
+ crm cluster init -s <share disk> -y
+
+ # Setup the cluster on the current node, with diskless SBD
+ crm cluster init -S -y
+
+ # Setup the cluster on the current node, with QDevice
+ crm cluster init --qnetd-hostname <qnetd addr> -y
+
+ # Setup the cluster on the current node, with SBD+OCFS2
+ crm cluster init -s <share disk1> -o <share disk2> -y
+
+ # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM
+ crm cluster init -s <share disk1> -o <share disk2> -o <share disk3> -C -y
+
+ # Add SBD on a running cluster
+ crm cluster init sbd -s <share disk> -y
+
+ # Replace SBD device on a running cluster which already configured SBD
+ crm -F cluster init sbd -s <share disk> -y
+
+ # Add diskless SBD on a running cluster
+ crm cluster init sbd -S -y
+
+ # Add QDevice on a running cluster
+ crm cluster init qdevice --qnetd-hostname <qnetd addr> -y
+
+ # Add OCFS2+Cluster LVM on a running cluster
+ crm cluster init ocfs2 -o <share disk1> -o <share disk2> -C -y
+""", add_help=False, formatter_class=RawDescriptionHelpFormatter)
+
+ parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
+ parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
+ help="Be quiet (don't describe what's happening, just do it)")
+ parser.add_argument("-y", "--yes", action="store_true", dest="yes_to_all",
+ help='Answer "yes" to all prompts (use with caution, this is destructive, especially those storage related configurations and stages.)')
+ parser.add_argument("-n", "--name", metavar="NAME", dest="cluster_name", default="hacluster",
+ help='Set the name of the configured cluster.')
+ parser.add_argument("-N", "--node", metavar="[USER@]HOST", dest="user_at_node_list", action=ArgparseUserAtHostAppendAction, default=[],
+ help='The member node of the cluster. Note: the current node is always get initialized during bootstrap in the beginning.')
+ parser.add_argument("-S", "--enable-sbd", dest="diskless_sbd", action="store_true",
+ help="Enable SBD even if no SBD device is configured (diskless mode)")
+ parser.add_argument("-w", "--watchdog", dest="watchdog", metavar="WATCHDOG",
+ help="Use the given watchdog device or driver name")
+ parser.add_argument("-x", "--skip-csync2-sync", dest="skip_csync2", action="store_true",
+ help="Skip csync2 initialization (an experimental option)")
+ parser.add_argument("--no-overwrite-sshkey", action="store_true", dest="no_overwrite_sshkey",
+ help='Avoid "/root/.ssh/id_rsa" overwrite if "-y" option is used (False by default; Deprecated)')
+ parser.add_argument('--use-ssh-agent', action='store_true', dest='use_ssh_agent',
+ help="Use an existing key from ssh-agent instead of creating new key pairs")
+
+ network_group = parser.add_argument_group("Network configuration", "Options for configuring the network and messaging layer.")
+ network_group.add_argument("-i", "--interface", dest="nic_list", metavar="IF", action=CustomAppendAction, choices=utils.interface_choice(), default=[],
+ help="Bind to IP address on interface IF. Use -i second time for second interface")
+ network_group.add_argument("-u", "--unicast", action="store_true", dest="unicast",
+ help="Configure corosync to communicate over unicast(udpu). This is the default transport type")
+ network_group.add_argument("-U", "--multicast", action="store_true", dest="multicast",
+ help="Configure corosync to communicate over multicast. Default is unicast")
+ network_group.add_argument("-A", "--admin-ip", dest="admin_ip", metavar="IP",
+ help="Configure IP address as an administration virtual IP")
+ network_group.add_argument("-M", "--multi-heartbeats", action="store_true", dest="second_heartbeat",
+ help="Configure corosync with second heartbeat line")
+ network_group.add_argument("-I", "--ipv6", action="store_true", dest="ipv6",
+ help="Configure corosync use IPv6")
+
+ qdevice_group = parser.add_argument_group("QDevice configuration", re.sub(' ', '', constants.QDEVICE_HELP_INFO) + "\n\nOptions for configuring QDevice and QNetd.")
+ qdevice_group.add_argument("--qnetd-hostname", dest="qnetd_addr", metavar="[USER@]HOST",
+ help="User and host of the QNetd server. The host can be specified in either hostname or IP address.")
+ qdevice_group.add_argument("--qdevice-port", dest="qdevice_port", metavar="PORT", type=int, default=5403,
+ help="TCP PORT of QNetd server (default:5403)")
+ qdevice_group.add_argument("--qdevice-algo", dest="qdevice_algo", metavar="ALGORITHM", default="ffsplit", choices=['ffsplit', 'lms'],
+ help="QNetd decision ALGORITHM (ffsplit/lms, default:ffsplit)")
+ qdevice_group.add_argument("--qdevice-tie-breaker", dest="qdevice_tie_breaker", metavar="TIE_BREAKER", default="lowest",
+ help="QNetd TIE_BREAKER (lowest/highest/valid_node_id, default:lowest)")
+ qdevice_group.add_argument("--qdevice-tls", dest="qdevice_tls", metavar="TLS", default="on", choices=['on', 'off', 'required'],
+ help="Whether using TLS on QDevice/QNetd (on/off/required, default:on)")
+ qdevice_group.add_argument("--qdevice-heuristics", dest="qdevice_heuristics", metavar="COMMAND",
+ help="COMMAND to run with absolute path. For multiple commands, use \";\" to separate (details about heuristics can see man 8 corosync-qdevice)")
+ qdevice_group.add_argument("--qdevice-heuristics-mode", dest="qdevice_heuristics_mode", metavar="MODE", choices=['on', 'sync', 'off'],
+ help="MODE of operation of heuristics (on/sync/off, default:sync)")
+
+ storage_group = parser.add_argument_group("Storage configuration", "Options for configuring shared storage.")
+ storage_group.add_argument("-s", "--sbd-device", dest="sbd_devices", metavar="DEVICE", action=CustomAppendAction, default=[],
+ help="Block device to use for SBD fencing, use \";\" as separator or -s multiple times for multi path (up to 3 devices)")
+ storage_group.add_argument("-o", "--ocfs2-device", dest="ocfs2_devices", metavar="DEVICE", action=CustomAppendAction, default=[],
+ help="Block device to use for OCFS2; When using Cluster LVM2 to manage the shared storage, user can specify one or multiple raw disks, use \";\" as separator or -o multiple times for multi path (must specify -C option) NOTE: this is a Technical Preview")
+ storage_group.add_argument("-C", "--cluster-lvm2", action="store_true", dest="use_cluster_lvm2",
+ help="Use Cluster LVM2 (only valid together with -o option) NOTE: this is a Technical Preview")
+ storage_group.add_argument("-m", "--mount-point", dest="mount_point", metavar="MOUNT", default="/srv/clusterfs",
+ help="Mount point for OCFS2 device (default is /srv/clusterfs, only valid together with -o option) NOTE: this is a Technical Preview")
+
+ options, args = parse_options(parser, args)
+ if options is None or args is None:
+ return
+
+ stage = ""
+ if len(args):
+ stage = args[0]
+ if stage == "vgfs":
+ stage = "ocfs2"
+ logger.warning("vgfs stage was deprecated and is an alias of ocfs2 stage now")
+ if stage not in bootstrap.INIT_STAGES and stage != "":
+ parser.error("Invalid stage (%s)" % (stage))
+
+ if options.qnetd_addr:
+ if not ServiceManager().service_is_available("corosync-qdevice.service"):
+ utils.fatal("corosync-qdevice.service is not available")
+ if options.qdevice_heuristics_mode and not options.qdevice_heuristics:
+ parser.error("Option --qdevice-heuristics is required if want to configure heuristics mode")
+ options.qdevice_heuristics_mode = options.qdevice_heuristics_mode or "sync"
+ elif re.search("--qdevice-.*", ' '.join(sys.argv)) or (stage == "qdevice" and options.yes_to_all):
+ parser.error("Option --qnetd-hostname is required if want to configure qdevice")
+
+ # if options.geo and options.name == "hacluster":
+ # parser.error("For a geo cluster, each cluster must have a unique name (use --name to set)")
+ boot_context = bootstrap.Context.set_context(options)
+ boot_context.ui_context = context
+ boot_context.stage = stage
+ boot_context.args = args
+ boot_context.cluster_is_running = ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("pacemaker.service")
+ boot_context.type = "init"
+ boot_context.initialize_qdevice()
+ boot_context.validate_option()
+
+ bootstrap.bootstrap_init(boot_context)
+ bootstrap.bootstrap_add(boot_context)
+
+ return True
+
+ @command.skill_level('administrator')
+ def do_join(self, context, *args):
+ '''
+ Join this node to an existing cluster
+ '''
+ parser = ArgumentParser(description="""
+Join the current node to an existing cluster. The
+current node cannot be a member of a cluster already.
+Pass any node in the existing cluster as the argument
+to the -c option.""",usage="join [options] [STAGE]", epilog="""
+
+Stage can be one of:
+ ssh Obtain SSH keys from existing cluster node (requires -c <host>)
+ csync2 Configure csync2 (requires -c <host>)
+ ssh_merge Merge root's SSH known_hosts across all nodes (csync2 must
+ already be configured).
+ cluster Start the cluster on this node
+
+If stage is not specified, each stage will be invoked in sequence.
+
+Examples:
+ # Join with a cluster node
+ crm cluster join -c <node> -y
+
+ # Join with a cluster node, with the same network interface used by that node
+ crm cluster join -c <node> -i eth1 -i eth2 -y
+""", add_help=False, formatter_class=RawDescriptionHelpFormatter)
+ parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
+ parser.add_argument("-q", "--quiet", help="Be quiet (don't describe what's happening, just do it)", action="store_true", dest="quiet")
+ parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts (use with caution)', action="store_true", dest="yes_to_all")
+ parser.add_argument("-w", "--watchdog", dest="watchdog", metavar="WATCHDOG", help="Use the given watchdog device")
+ parser.add_argument('--use-ssh-agent', action='store_true', dest='use_ssh_agent',
+ help="Use an existing key from ssh-agent instead of creating new key pairs")
+
+ network_group = parser.add_argument_group("Network configuration", "Options for configuring the network and messaging layer.")
+ network_group.add_argument(
+ "-c", "--cluster-node", metavar="[USER@]HOST", dest="cluster_node",
+ help="User and host to login to an existing cluster node. The host can be specified with either a hostname or an IP.",
+ )
+ network_group.add_argument("-i", "--interface", dest="nic_list", metavar="IF", action=CustomAppendAction, choices=utils.interface_choice(), default=[],
+ help="Bind to IP address on interface IF. Use -i second time for second interface")
+ options, args = parse_options(parser, args)
+ if options is None or args is None:
+ return
+
+ stage = ""
+ if len(args) == 1:
+ stage = args[0]
+ if stage not in ("ssh", "csync2", "ssh_merge", "cluster", ""):
+ parser.error("Invalid stage (%s)" % (stage))
+
+ join_context = bootstrap.Context.set_context(options)
+ join_context.ui_context = context
+ join_context.stage = stage
+ join_context.type = "join"
+ join_context.validate_option()
+
+ bootstrap.bootstrap_join(join_context)
+
+ return True
+
+ @command.alias("delete")
+ @command.completers_repeating(_remove_completer)
+ @command.skill_level('administrator')
+ def do_remove(self, context, *args):
+ '''
+ Remove the given node(s) from the cluster.
+ '''
+ parser = ArgumentParser(description="""
+Remove one or more nodes from the cluster.
+
+This command can remove the last node in the cluster,
+thus effectively removing the whole cluster. To remove
+the last node, pass --force argument to crm or set
+the config.core.force option.""",
+ usage="remove [options] [<node> ...]", add_help=False, formatter_class=RawDescriptionHelpFormatter)
+ parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
+ parser.add_argument("-q", "--quiet", help="Be quiet (don't describe what's happening, just do it)", action="store_true", dest="quiet")
+ parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts (use with caution)', action="store_true", dest="yes_to_all")
+ parser.add_argument("-c", "--cluster-node", dest="cluster_node", help="IP address or hostname of cluster node which will be deleted", metavar="HOST")
+ parser.add_argument("-F", "--force", dest="force", help="Remove current node", action="store_true")
+ parser.add_argument("--qdevice", dest="qdevice_rm_flag", help="Remove QDevice configuration and service from cluster", action="store_true")
+ options, args = parse_options(parser, args)
+ if options is None or args is None:
+ return
+
+ if options.cluster_node is not None and options.cluster_node not in args:
+ args = list(args) + [options.cluster_node]
+
+ rm_context = bootstrap.Context.set_context(options)
+ rm_context.ui_context = context
+
+ if len(args) == 0:
+ bootstrap.bootstrap_remove(rm_context)
+ else:
+ for node in args:
+ rm_context.cluster_node = node
+ bootstrap.bootstrap_remove(rm_context)
+ return True
+
+ @command.skill_level('administrator')
+ def do_rename(self, context, new_name):
+ '''
+ Rename the cluster.
+ '''
+ if not ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("corosync.service"):
+ context.fatal_error("Can't rename cluster when cluster service is stopped")
+ old_name = cib_factory.get_property('cluster-name')
+ if old_name and new_name == old_name:
+ context.fatal_error("Expected a different name")
+
+ # Update config file with the new name on all nodes
+ nodes = utils.list_cluster_nodes()
+ corosync.set_value('totem.cluster_name', new_name)
+ if len(nodes) > 1:
+ nodes.remove(utils.this_node())
+ context.info("Copy cluster config file to \"{}\"".format(' '.join(nodes)))
+ corosync.push_configuration(nodes)
+
+ # Change the cluster-name property in the CIB
+ cib_factory.create_object("property", "cluster-name={}".format(new_name))
+ if not cib_factory.commit():
+ context.fatal_error("Change property cluster-name failed!")
+
+ # it's a safe way to give user a hints that need to restart service
+ context.info("To apply the change, restart the cluster service at convenient time")
+
+ def _parse_clustermap(self, clusters):
+ '''
+ Helper function to parse the cluster map into a dictionary:
+
+ name=ip; name2=ip2 -> { name: ip, name2: ip2 }
+ '''
+ if clusters is None:
+ return None
+ try:
+ return dict([re.split('[=:]+', o) for o in re.split('[ ,;]+', clusters)])
+ except TypeError:
+ return None
+ except ValueError:
+ return None
+
+ @command.name("geo_init")
+ @command.alias("geo-init")
+ @command.skill_level('administrator')
+ def do_geo_init(self, context, *args):
+ '''
+ Make this cluster a geo cluster.
+ Needs some information to set up.
+
+ * cluster map: "cluster-name=ip cluster-name=ip"
+ * arbitrator IP / hostname (optional)
+ * list of tickets (can be empty)
+ '''
+ parser = ArgumentParser(description="""
+Create a new geo cluster with the current cluster as the
+first member. Pass the complete geo cluster topology as
+arguments to this command, and then use geo-join and
+geo-init-arbitrator to add the remaining members to
+the geo cluster.""",
+ usage="geo-init [options]", epilog="""
+
+Cluster Description
+
+ This is a map of cluster names to IP addresses.
+ Each IP address will be configured as a virtual IP
+ representing that cluster in the geo cluster
+ configuration.
+
+ Example with two clusters named paris and amsterdam:
+
+ --clusters "paris=192.168.10.10 amsterdam=192.168.10.11"
+
+ Name clusters using the --name parameter to
+ crm bootstrap init.
+""", add_help=False, formatter_class=RawDescriptionHelpFormatter)
+ parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
+ parser.add_argument("-q", "--quiet", help="Be quiet (don't describe what's happening, just do it)", action="store_true", dest="quiet")
+ parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts (use with caution)', action="store_true", dest="yes_to_all")
+ parser.add_argument(
+ "-a", "--arbitrator", dest="arbitrator", metavar="[USER@]HOST",
+ help="Geo cluster arbitrator",
+ )
+ parser.add_argument("-s", "--clusters", help="Geo cluster description (see details below)", dest="clusters", metavar="DESC")
+ parser.add_argument("-t", "--tickets", help="Tickets to create (space-separated)", dest="tickets", metavar="LIST")
+ options, args = parse_options(parser, args)
+ if options is None or args is None:
+ return
+
+ if options.clusters is None:
+ errs = []
+ if options.clusters is None:
+ errs.append("The --clusters argument is required.")
+ parser.error(" ".join(errs))
+
+ clustermap = self._parse_clustermap(options.clusters)
+ if clustermap is None:
+ parser.error("Invalid cluster description format")
+ ticketlist = []
+ if options.tickets is not None:
+ try:
+ ticketlist = [t for t in re.split('[ ,;]+', options.tickets)]
+ except ValueError:
+ parser.error("Invalid ticket list")
+
+ geo_context = bootstrap.Context.set_context(options)
+ geo_context.clusters = clustermap
+ geo_context.tickets = ticketlist
+ geo_context.ui_context = context
+
+ bootstrap.bootstrap_init_geo(geo_context)
+ return True
+
+ @command.name("geo_join")
+ @command.alias("geo-join")
+ @command.skill_level('administrator')
+ def do_geo_join(self, context, *args):
+ '''
+ Join this cluster to a geo configuration.
+ '''
+ parser = ArgumentParser(description="""
+This command should be run from one of the nodes in a cluster
+which is currently not a member of a geo cluster. The geo
+cluster configuration will be fetched from the provided node,
+and the cluster will be added to the geo cluster.
+
+Note that each cluster in a geo cluster needs to have a unique
+name set. The cluster name can be set using the --name argument
+to init, or by configuring corosync with the cluster name in
+an existing cluster.""",
+ usage="geo-join [options]", add_help=False, formatter_class=RawDescriptionHelpFormatter)
+ parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
+ parser.add_argument("-q", "--quiet", help="Be quiet (don't describe what's happening, just do it)", action="store_true", dest="quiet")
+ parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts (use with caution)', action="store_true", dest="yes_to_all")
+ parser.add_argument("-c", "--cluster-node", metavar="[USER@]HOST", help="An already-configured geo cluster or arbitrator", dest="cluster_node")
+ parser.add_argument("-s", "--clusters", help="Geo cluster description (see geo-init for details)", dest="clusters", metavar="DESC")
+ options, args = parse_options(parser, args)
+ if options is None or args is None:
+ return
+ errs = []
+ if options.cluster_node is None:
+ errs.append("The --cluster-node argument is required.")
+ if options.clusters is None:
+ errs.append("The --clusters argument is required.")
+ if len(errs) > 0:
+ parser.error(" ".join(errs))
+ clustermap = self._parse_clustermap(options.clusters)
+ if clustermap is None:
+ parser.error("Invalid cluster description format")
+
+ geo_context = bootstrap.Context.set_context(options)
+ geo_context.clusters = clustermap
+ geo_context.ui_context = context
+
+ bootstrap.bootstrap_join_geo(geo_context)
+ return True
+
+ @command.name("geo_init_arbitrator")
+ @command.alias("geo-init-arbitrator")
+ @command.skill_level('administrator')
+ def do_geo_init_arbitrator(self, context, *args):
+ '''
+ Make this node a geo arbitrator.
+ '''
+ parser = ArgumentParser(description="""
+Configure the current node as a geo arbitrator. The command
+requires an existing geo cluster or geo arbitrator from which
+to get the geo cluster configuration.""",
+ usage="geo-init-arbitrator [options]", add_help=False, formatter_class=RawDescriptionHelpFormatter)
+ parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
+ parser.add_argument("-q", "--quiet", help="Be quiet (don't describe what's happening, just do it)", action="store_true", dest="quiet")
+ parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts (use with caution)', action="store_true", dest="yes_to_all")
+ parser.add_argument("-c", "--cluster-node", metavar="[USER@]HOST", help="An already-configured geo cluster", dest="cluster_node")
+ parser.add_argument('--use-ssh-agent', action='store_true', dest='use_ssh_agent',
+ help="Use an existing key from ssh-agent instead of creating new key pairs")
+ options, args = parse_options(parser, args)
+ if options is None or args is None:
+ return
+
+ geo_context = bootstrap.Context.set_context(options)
+ geo_context.ui_context = context
+
+ bootstrap.bootstrap_arbitrator(geo_context)
+ return True
+
+ @command.completers_repeating(compl.call(scripts.param_completion_list, 'health'))
+ def do_health(self, context, *args):
+ '''
+ Extensive health check.
+ '''
+ params = self._args_implicit(context, args, 'nodes')
+ script = scripts.load_script('health')
+ if script is None:
+ raise ValueError("health script failed to load")
+ return scripts.run(script, script_args(params), script_printer())
+
+ def _node_in_cluster(self, node):
+ return node in utils.list_cluster_nodes()
+
+ def do_status(self, context):
+ '''
+ Quick cluster health status. Corosync status, DRBD status...
+ '''
+ print("Name: {}\n".format(get_cluster_name()))
+ print("Services:")
+ for svc in ["corosync", "pacemaker"]:
+ info = utils.service_info(svc)
+ if info:
+ print("%-16s %s" % (svc, info))
+ else:
+ print("%-16s unknown" % (svc))
+
+ rc, outp = ShellUtils().get_stdout(['corosync-cfgtool', '-s'], shell=False)
+ if rc == 0:
+ print("")
+ print(outp)
+ else:
+ print("Failed to get corosync status")
+
+ @command.completers_repeating(compl.choice(['10', '60', '600']))
+ def do_wait_for_startup(self, context, timeout='10'):
+ "usage: wait_for_startup [<timeout>]"
+ import time
+ t0 = time.time()
+ timeout = float(timeout)
+ cmd = 'crm_mon -bD1 >/dev/null 2>&1'
+ ret = utils.ext_cmd(cmd)
+ while ret in (107, 64) and time.time() < t0 + timeout:
+ time.sleep(1)
+ ret = utils.ext_cmd(cmd)
+ if ret != 0:
+ context.fatal_error("Timed out waiting for cluster (rc = %s)" % (ret))
+
+ @command.skill_level('expert')
+ def do_run(self, context, cmd, *nodes):
+ '''
+ Execute the given command on all nodes/specific node(s), report outcome
+ '''
+ if nodes:
+ hosts = list(nodes)
+ else:
+ hosts = utils.list_cluster_nodes()
+ if hosts is None:
+ context.fatal_error("failed to get node list from cluster")
+
+ for host, result in prun.prun({x: cmd for x in hosts}).items():
+ if isinstance(result, prun.PRunError):
+ logger.error("[%s]: %s", host, result)
+ else:
+ if result.returncode != 0:
+ logger.error(
+ "[%s]: Exited with error code %s. Error output: %s",
+ host, result.returncode, utils.to_ascii(result.stderr),
+ )
+ else:
+ if not result.stdout:
+ logger.info("[%s]", host)
+ else:
+ logger.info("[%s]\n%s", host, utils.to_ascii(result.stdout))
+
+ def do_copy(self, context, local_file, *nodes):
+ '''
+ usage: copy <filename> [nodes ...]
+ Copy file to other cluster nodes.
+ If given no nodes as arguments, copy to all other cluster nodes.
+ '''
+ return utils.cluster_copy_file(local_file, nodes)
+
+ def do_diff(self, context, filename, *nodes):
+ "usage: diff <filename> [--checksum] [nodes...]. Diff file across cluster."
+ nodes = list(nodes)
+ this_node = utils.this_node()
+ checksum = False
+ if len(nodes) and nodes[0] == '--checksum':
+ nodes = nodes[1:]
+ checksum = True
+ if not nodes:
+ nodes = utils.list_cluster_nodes()
+ if checksum:
+ utils.remote_checksum(filename, nodes, this_node)
+ elif len(nodes) == 1:
+ utils.remote_diff_this(filename, nodes, this_node)
+ elif this_node in nodes:
+ nodes.remove(this_node)
+ utils.remote_diff_this(filename, nodes, this_node)
+ elif len(nodes):
+ utils.remote_diff(filename, nodes)
+
+ def do_crash_test(self, context, *args):
+ """
+ """
+ from .crash_test import main
+ sys.argv[1:] = args
+ main.ctx.process_name = context.command_name
+ main.run(main.ctx)
+ return True
diff --git a/crmsh/ui_configure.py b/crmsh/ui_configure.py
new file mode 100644
index 0000000..fbfc40a
--- /dev/null
+++ b/crmsh/ui_configure.py
@@ -0,0 +1,1241 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import re
+import time
+from . import command
+from . import completers as compl
+from . import config
+from . import utils
+from . import constants
+from . import userdir
+from . import xmlutil
+from . import ra
+from .cibconfig import mkset_obj, cib_factory
+from . import clidisplay
+from . import term
+from . import options
+from . import rsctest
+from . import schema
+from . import ui_cib
+from . import ui_cibstatus
+from . import ui_ra
+from . import ui_template
+from . import ui_history
+from . import ui_utils
+from . import ui_assist
+from .crm_gv import gv_types
+from .ui_node import get_resources_on_nodes, remove_redundant_attrs
+
+
+from . import log
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+def _type_completions():
+ "completer for type: use in show"
+ typelist = cib_factory.type_list()
+ return ['type:%s' % (t) for t in typelist]
+
+
+def _tag_completions():
+ "completer for tag: use in show"
+ return ['tag:%s' % (t) for t in cib_factory.tag_list()]
+
+
+# Tab completion helpers
+_id_list = compl.call(cib_factory.id_list)
+_id_xml_list = compl.join(_id_list, compl.choice(['xml']))
+_id_show_list = compl.join(_id_list,
+ compl.choice(['xml', 'changed']),
+ compl.call(_type_completions),
+ compl.call(_tag_completions))
+_prim_id_list = compl.call(cib_factory.prim_id_list)
+_f_prim_free_id_list = compl.call(cib_factory.f_prim_free_id_list)
+_f_group_id_list = compl.call(cib_factory.f_group_id_list)
+_f_children_id_list = compl.call(cib_factory.f_children_id_list)
+_rsc_id_list = compl.call(cib_factory.rsc_id_list)
+_top_rsc_id_list = compl.call(cib_factory.top_rsc_id_list)
+_node_id_list = compl.call(cib_factory.node_id_list)
+_rsc_template_list = compl.call(cib_factory.rsc_template_list)
+_container_type = compl.choice(constants.container_type)
+
+
+def _group_completer(args):
+ '''
+ completer for group resource
+ '''
+ completing = args[-1]
+ id_list = cib_factory.f_prim_free_id_list()
+ if completing in id_list:
+ return [completing]
+ # complete resources id first
+ if len(args) == 3:
+ return [s for s in id_list if s not in args]
+ # complete meta or params attributes
+ key_words = ["meta", "params"]
+ keyw = last_keyword(args, key_words)
+ if keyw in key_words:
+ return _advanced_completer(args)
+
+ # otherwise, complete resources ids and some key words
+ return [s for s in id_list if s not in args] + _advanced_completer(args)
+
+
+def _advanced_completer(args):
+ '''
+ meta completers for group/ms/clone resource type
+ '''
+ key_words = ["meta", "params"]
+ completing = args[-1]
+ resource_type = args[0]
+ return_list = []
+ if completing.endswith('='):
+ # TODO add some help messages
+ return []
+ keyw = last_keyword(args, key_words)
+ if keyw and keyw == "meta":
+ if resource_type == "group":
+ return_list = utils.filter_keys(constants.group_meta_attributes, args)
+ if resource_type == "clone":
+ return_list = utils.filter_keys(constants.clone_meta_attributes, args)
+ if resource_type in ["ms", "master"]:
+ return_list = utils.filter_keys(constants.ms_meta_attributes, args)
+ return return_list + key_words
+
+
+def _list_resource(args):
+ if len(args) > 3:
+ if args[2] == "remove":
+ res = cib_factory.f_prim_list_in_group(args[1])
+ if len(res) <= 1:
+ return []
+ else:
+ return res
+ if args[2] == "add":
+ return cib_factory.f_prim_free_id_list()
+
+
+def _list_resource_2(args):
+ if len(args) > 5:
+ return cib_factory.f_prim_list_in_group(args[1])
+
+
+def _pick_position(args):
+ if args[2] == "remove":
+ return []
+ else:
+ return ["after", "before"]
+
+
+def top_rsc_tmpl_id_list(args):
+ return cib_factory.top_rsc_id_list() + cib_factory.rsc_template_list()
+
+
+def ra_classes_or_tmpl(args):
+ return ui_ra.complete_class_provider_type(args) + \
+ ['@'+x for x in cib_factory.rsc_template_list()]
+
+
+def op_attr_list(args):
+ schema_attr = [schema.get('attr', 'op', 'o') + '=']
+ extra_attrs = [s + '=' for s in constants.op_extra_attrs]
+ return schema_attr + extra_attrs
+
+
+def node_id_colon_list(args):
+ return [s + ':' for s in _node_id_list(args)]
+
+
+def stonith_resource_list(args):
+ return [x.obj_id for x in
+ cib_factory.get_elems_on_type("type:primitive")
+ if x.node.get("class") == "stonith"]
+
+
+def _load_2nd_completer(args):
+ if args[1] == 'xml':
+ return ['replace', 'update', 'push']
+ return []
+
+
+# completion for primitives including help for parameters
+# (help also available for properties)
+
+def get_prim_token(words, n):
+ for key in ("primitive", "rsc_template"):
+ try:
+ if key in words:
+ return words[words.index(key) + n - 1]
+ except IndexError:
+ pass
+ return ''
+
+
+def ra_agent_for_template(tmpl):
+ '''@template -> ra.agent'''
+ obj = cib_factory.find_resource(tmpl[1:])
+ if obj is None:
+ return None
+ return ra.get_ra(obj.node)
+
+
+def ra_agent_for_cpt(cpt):
+ '''class:provider:type -> ra.agent'''
+ agent = None
+ ra_class, provider, rsc_type = ra.disambiguate_ra_type(cpt)
+ if ra.ra_type_validate(cpt, ra_class, provider, rsc_type):
+ agent = ra.RAInfo(ra_class, rsc_type, provider)
+ return agent
+
+
+class CompletionHelp(object):
+ '''
+ Print some help on whatever last word in the line.
+ '''
+ timeout = 60 # don't print again and again
+ laststamp = 0
+ lasttopic = ''
+
+ @classmethod
+ def help(cls, topic, helptxt, args):
+ if cls.lasttopic == topic and \
+ time.time() - cls.laststamp < cls.timeout:
+ return
+ if helptxt:
+ import readline
+ cmdline = readline.get_line_buffer()
+ print("\n%s" % helptxt, end='')
+ if cmdline.split()[0] != args[0]:
+ prompt = ' > '
+ else:
+ if clidisplay.colors_enabled():
+ prompt = term.render(clidisplay.prompt_noreadline(constants.prompt))
+ else:
+ prompt = constants.prompt
+ print("\n%s%s" % (prompt, cmdline), end=' ')
+ cls.laststamp = time.time()
+ cls.lasttopic = topic
+
+
+def _prim_params_completer(agent, args):
+ completing = args[-1]
+ if completing == 'params':
+ return ['params']
+ if completing.endswith('='):
+ if len(completing) > 1 and options.interactive:
+ topic = completing[:-1]
+ CompletionHelp.help(topic, agent.meta_parameter(topic), args)
+ return []
+ elif '=' in completing:
+ return []
+ return utils.filter_keys(agent.params(completion=True), args)
+
+
+def _prim_meta_completer(agent, args):
+ completing = args[-1]
+ if completing == 'meta':
+ return ['meta']
+ if '=' in completing:
+ return []
+ return utils.filter_keys(constants.rsc_meta_attributes, args)
+
+
+def _prim_op_completer(agent, args):
+
+ def concat_kv(k, v):
+ return "{}={}".format(k, v)
+
+ if args[-1] == 'op':
+ return ['op']
+ actions = agent.actions()
+ if not actions:
+ return []
+ # list all actions, select one to complete
+ if args[-2] == 'op':
+ return actions.keys()
+ # list all attributes of the action, select one to complete
+ if args[-3] == 'op':
+ res = []
+ op_name = args[-2]
+ if op_name == 'monitor':
+ for one_monitor in actions[op_name]:
+ res += [concat_kv(k, v) for k, v in one_monitor.items()]
+ else:
+ res = [concat_kv(k, v) for k, v in actions[op_name].items()]
+ return res
+
+ args.pop()
+ if '=' in args[-1]:
+ res = []
+ # find latest action
+ op_name = None
+ for i, item in enumerate(reversed(args)):
+ if item in actions:
+ op_name = item
+ break
+ if not op_name:
+ return []
+ # list all left attributes of the action, select one to complete
+ actions_list_in_args = [arg.split('=')[0] for arg in args[len(args)-i:]]
+ if op_name == 'monitor':
+ for one_monitor in actions[op_name]:
+ res += [concat_kv(k, v) for k, v in one_monitor.items() if k not in actions_list_in_args]
+ else:
+ res = [concat_kv(k, v) for k, v in actions[op_name].items() if k not in actions_list_in_args]
+ return res
+
+ return []
+
+
+def last_keyword(words, keyw):
+ '''returns the last occurance of an element in keyw in words'''
+ for w in reversed(words):
+ if w in keyw:
+ return w
+ return None
+
+
+def _property_completer(args):
+ '''context-sensitive completer'''
+ agent = ra.get_properties_meta()
+ return _prim_params_completer(agent, args)
+
+
+def primitive_complete_complex(args):
+ '''
+ This completer depends on the content of the line, i.e. on
+ previous tokens, in particular on the type of the RA.
+ '''
+ cmd = get_prim_token(args, 1)
+ type_word = get_prim_token(args, 3)
+ with_template = cmd == 'primitive' and type_word.startswith('@')
+
+ if with_template:
+ agent = ra_agent_for_template(type_word)
+ else:
+ agent = ra_agent_for_cpt(type_word)
+ if agent is None:
+ return []
+
+ completers_set = {
+ "params": _prim_params_completer,
+ "meta": _prim_meta_completer,
+ "op": _prim_op_completer,
+ }
+
+ keywords = list(completers_set.keys())
+ if len(args) == 4: # <cmd> <id> <type> <?>
+ return keywords
+
+ last_keyw = last_keyword(args, keywords)
+ if last_keyw is None:
+ return []
+
+ complete_results = completers_set[last_keyw](agent, args)
+ if len(args) > 4 and '=' in args[-1]:
+ return complete_results + keywords
+
+ return complete_results
+
+
+def container_helptxt(params, helptxt, topic):
+ for item in reversed(params):
+ if item in ["storage", "network", "docker", "rkt"]:
+ return helptxt[item][topic] + "\n"
+ if item == "port-mapping":
+ return helptxt["network"][item][topic] + "\n"
+
+
+def _container_remove_exist_keywords(args, _keywords):
+ for item in ["network", "primitive"]:
+ if item in args:
+ _keywords.remove(item)
+
+
+def _container_network_completer(args, _help, _keywords):
+ key_words = ["network", "port-mapping"]
+ completing = args[-1]
+ token = args[-2]
+ if completing.endswith("="):
+ return []
+ if completing in key_words:
+ return [completing]
+
+ tmp = list(_help["network"].keys())
+ # port-mapping is element, not a network option
+ tmp.remove("port-mapping")
+ network_keys = utils.filter_keys(tmp, args)
+ # bundle contain just one <network>/<primitive> element
+ _container_remove_exist_keywords(args, _keywords)
+
+ last_keyw = last_keyword(args, key_words)
+ if last_keyw == "network":
+ if token == "network":
+ return network_keys
+ else:
+ # complete port-mapping or other parts
+ return network_keys + ["port-mapping"] + _keywords
+
+ if last_keyw == "port-mapping":
+ mapping_required = ["id"]
+ mapping_params = args[utils.rindex(args, "port-mapping"):]
+ mapping_keys = utils.filter_keys(_help["network"]["port-mapping"].keys(), mapping_params)
+ if token == "port-mapping":
+ return mapping_keys
+ # required options must be completed
+ for s in mapping_required:
+ if utils.any_startswith(mapping_params, s+'=') is None:
+ return mapping_keys
+ # complete port-mapping or other parts
+ return mapping_keys + ["port-mapping"] + _keywords
+
+
+def _container_storage_completer(args, _help, _keywords):
+ completing = args[-1]
+ if completing.endswith("="):
+ return []
+ if completing == "storage":
+ return [completing]
+ if args[-2] == "storage":
+ return ["storage-mapping"]
+
+ storage_required = ["id", "target-dir"]
+ # get last storage part
+ mapping_params = args[utils.rindex(args, "storage-mapping"):]
+ storage_keys = utils.filter_keys(_help["storage"].keys(), mapping_params)
+
+ # required options must be completed
+ for s in storage_required:
+ if utils.any_startswith(mapping_params, s+"=") is None:
+ return storage_keys
+ # bundle contain just one <network>/<primitive> element
+ _container_remove_exist_keywords(args, _keywords)
+ # complete storage or other parts
+ return storage_keys + _keywords
+
+
+def _container_primitive_completer(args, _help, _keywords):
+ completing = args[-1]
+ if completing == "primitive":
+ return [completing]
+
+ _id_list = cib_factory.f_prim_free_id_list()
+ if _id_list is None:
+ return []
+ # bundle contain just one <network>/<primitive> element
+ _container_remove_exist_keywords(args, _keywords)
+ if args[-3] == "primitive" and args[-2] in _id_list:
+ return _keywords
+ return _id_list
+
+
+def _container_meta_completer(args, helptxt, _keywords):
+ completing = args[-1]
+ if completing.endswith("="):
+ return []
+ if completing == "meta":
+ return [completing]
+
+ # bundle contain just one <network>/<primitive> element
+ _container_remove_exist_keywords(args, _keywords)
+
+ return utils.filter_keys(constants.bundle_meta_attributes, args) + _keywords
+
+
+def container_complete_complex(args):
+ '''
+ Complete five parts:
+ container options, network, storage, primitive and meta
+ '''
+ container_options_required = ["image"]
+ completing = args[-1]
+ container_type = args[2]
+
+ completers_set = {
+ "network": _container_network_completer,
+ "storage": _container_storage_completer,
+ "primitive": _container_primitive_completer,
+ "meta": _container_meta_completer
+ }
+ keywords = list(completers_set.keys())
+ last_keyw = last_keyword(args, keywords)
+
+ # to show help messages
+ if completing.endswith('='):
+ if len(completing) > 1 and options.interactive:
+ topic = completing[:-1]
+ CompletionHelp.help(topic, container_helptxt(args, constants.container_helptxt, topic), args)
+ return []
+
+ container_options = utils.filter_keys(constants.container_helptxt[container_type].keys(), args)
+
+ # required options must be completed
+ for s in container_options_required:
+ if utils.any_startswith(args, s+'=') is None:
+ return container_options
+
+ if last_keyw is None:
+ return container_options + keywords
+
+ # to complete network, storage, primitive and meta
+ return completers_set[last_keyw](args, constants.container_helptxt, keywords)
+
+
+class CibConfig(command.UI):
+ '''
+ The configuration class
+ '''
+ name = "configure"
+
+ def __init__(self):
+ command.UI.__init__(self)
+ # for interactive use, we want to populate the CIB
+ # immediately so that tab completion works
+
+ def requires(self):
+ if not cib_factory.initialize():
+ return False
+ # see the configure ptest/simulate command
+ has_ptest = utils.is_program('ptest')
+ has_simulate = utils.is_program('crm_simulate')
+ if not has_ptest:
+ constants.simulate_programs["ptest"] = "crm_simulate"
+ if not has_simulate:
+ constants.simulate_programs["simulate"] = "ptest"
+ if not (has_ptest or has_simulate):
+ logger.warning("neither ptest nor crm_simulate exist, check your installation")
+ constants.simulate_programs["ptest"] = ""
+ constants.simulate_programs["simulate"] = ""
+ return True
+
+ @command.name('_test')
+ @command.skill_level('administrator')
+ def do_check_structure(self, context):
+ cib_factory.ensure_cib_updated()
+ return cib_factory.check_structure()
+
+ @command.name('_regtest')
+ @command.skill_level('administrator')
+ def do_regression_testing(self, context, param):
+ return cib_factory.regression_testing(param)
+
+ @command.name('_objects')
+ @command.skill_level('administrator')
+ def do_showobjects(self, context):
+ cib_factory.showobjects()
+
+ @command.name('_keywords')
+ @command.skill_level('administrator')
+ def do_keywords(self, context):
+ for k, v in sorted(iter(constants.keywords.items()), key=lambda v: v[0].lower()):
+ print("%-16s %s" % (k, v))
+
+ @command.level(ui_ra.RA)
+ def do_ra(self):
+ pass
+
+ @command.level(ui_cib.CibShadow)
+ def do_cib(self):
+ pass
+
+ @command.level(ui_cibstatus.CibStatusUI)
+ def do_cibstatus(self):
+ pass
+
+ @command.level(ui_template.Template)
+ def do_template(self):
+ pass
+
+ @command.level(ui_history.History)
+ def do_history(self):
+ pass
+
+ @command.level(ui_assist.Assist)
+ def do_assist(self):
+ pass
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(_id_show_list)
+ def do_show(self, context, *args):
+ "usage: show [xml] [<id>...]"
+ from .utils import obscure
+ osargs = [arg[8:] for arg in args if arg.startswith('obscure:')]
+ if not osargs and config.core.obscure_pattern:
+ # obscure_pattern could be
+ # 1. "pattern1 pattern2 pattern3"
+ # 2. "pattern1|pattern2|pattern3"
+ # regrex here also filter out possible spaces
+ osargs = re.split('\s*\|\s*|\s+', config.core.obscure_pattern.strip('|'))
+ args = [arg for arg in args if not arg.startswith('obscure:')]
+ cib_factory.ensure_cib_updated()
+ with obscure(osargs):
+ set_obj = mkset_obj(*args)
+ return set_obj.show()
+
+ @command.name("get_property")
+ @command.alias("get-property")
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(ra.get_properties_list))
+ def do_get_property(self, context, *args):
+ "usage: get-property [-t|--true [<name>...]"
+ properties = [a for a in args if a not in ('-t', '--true')]
+ truth = any(a for a in args if a in ('-t', '--true'))
+
+ if not properties:
+ utils.multicolumn(ra.get_properties_list())
+ return
+
+ def print_value(v):
+ if truth:
+ print(utils.canonical_boolean(v))
+ else:
+ print(v)
+ cib_factory.ensure_cib_updated()
+ for p in properties:
+ v = cib_factory.get_property_w_default(p)
+ if v is not None:
+ print_value(v)
+ elif truth:
+ print("false")
+ else:
+ context.fatal_error("%s: Property not set" % (p))
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.null, _id_xml_list, _id_list)
+ def do_filter(self, context, filterprog, *args):
+ "usage: filter <prog> [xml] [<id>...]"
+ cib_factory.ensure_cib_updated()
+ set_obj = mkset_obj(*args)
+ return set_obj.filter(filterprog)
+
+ @command.skill_level('administrator')
+ @command.completers(_id_list)
+ def do_set(self, context, path, value):
+ """
+ usage: set <path> <value>
+
+ path:: id.[op_type.][interval.]name
+ """
+ path_errmsg = "Invalid path: \"{}\"; Valid path: \"id.[op_type.][interval.]name\"".format(path)
+ path_list = path.split('.')
+ if len(path_list) < 2 or len(path_list) > 4:
+ context.fatal_error(path_errmsg)
+
+ cib_factory.ensure_cib_updated()
+ obj_id, *other_path_list = path_list
+ rsc = cib_factory.find_object(obj_id)
+ if not rsc:
+ context.fatal_error("Object {} not found".format(obj_id))
+
+ # Use case for: set id.name value
+ if len(other_path_list) == 1:
+ obj_attr = other_path_list[0]
+ nvpairs = rsc.node.xpath(".//nvpair[@name='{}']".format(obj_attr))
+ if not nvpairs:
+ context.fatal_error("Attribute not found: {}".format(path))
+ if len(nvpairs) != 1:
+ context.fatal_error("Expected 1 attribute named {}, found {}".format(obj_attr, len(nvpairs)))
+ nvpairs[0].set("value", value)
+
+ # Use case for: set id.op_type.name value
+ if len(other_path_list) == 2:
+ op_type, name = other_path_list
+ op_res = rsc.node.xpath(".//operations/op[@name='{}']".format(op_type))
+ if not op_res:
+ context.fatal_error("Operation \"{}\" not found for resource {}".format(op_type, obj_id))
+ if len(op_res) > 1:
+ context.fatal_error("Should specify interval of {}".format(op_type))
+ op_res[0].set(name, value)
+
+ # Use case for: set id.op_type.interval.name value
+ if len(other_path_list) == 3:
+ op_type, iv, name = other_path_list
+ op_res = rsc.node.xpath(".//operations/op[@id='{}-{}-{}']".format(obj_id, op_type, iv))
+ if not op_res:
+ context.fatal_error("Operation \"{}\" interval \"{}\" not found for resource {}".format(op_type, iv, obj_id))
+ op_res[0].set(name, value)
+
+ rsc.set_updated()
+ return True
+
+ @command.skill_level('administrator')
+ @command.completers(_f_group_id_list, compl.choice(['add', 'remove']),
+ _list_resource, _pick_position, _list_resource_2)
+ def do_modgroup(self, context, group_id, subcmd, prim_id, *args):
+ """usage: modgroup <id> add <id> [after <id>|before <id>]
+ modgroup <id> remove <id>"""
+ if subcmd not in ("add", "remove"):
+ logger.error("modgroup subcommand %s unknown" % subcmd)
+ return False
+ after_before = None
+ if args:
+ if subcmd != 'add':
+ context.fatal_error("Expected add (found %s)" % subcmd)
+ if args[0] not in ("after", "before"):
+ context.fatal_error("Expected after|before (found %s)" % args[0])
+ if len(args) != 2:
+ context.fatal_error("Expected 'after|before <id>' (%d arguments given)" %
+ len(args))
+ after_before = args[0]
+ ref_member_id = args[1]
+ cib_factory.ensure_cib_updated()
+ g = cib_factory.find_object(group_id)
+ if not g:
+ context.fatal_error("group %s does not exist" % group_id)
+ if not xmlutil.is_group(g.node):
+ context.fatal_error("element %s is not a group" % group_id)
+ children = xmlutil.get_rsc_children_ids(g.node)
+ if after_before and ref_member_id not in children:
+ context.fatal_error("%s is not member of %s" % (ref_member_id, group_id))
+ if subcmd == "remove" and prim_id not in children:
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ # done checking arguments
+ # have a group and children
+ if not after_before:
+ after_before = "after"
+ ref_member_id = children[-1]
+ # just do the filter
+ # (i wonder if this is a feature abuse?)
+ if subcmd == "add":
+ if after_before == "after":
+ sed_s = r's/ %s( |$)/& %s /' % (ref_member_id, prim_id)
+ else:
+ sed_s = r's/ %s( |$)/ %s& /' % (ref_member_id, prim_id)
+ else:
+ sed_s = r's/ %s( |$)/ /' % prim_id
+ l = (group_id,)
+ set_obj = mkset_obj(*l)
+ return set_obj.filter("sed -r '%s'" % sed_s)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(_id_xml_list, _id_list)
+ def do_edit(self, context, *args):
+ "usage: edit [xml] [<id>...]"
+ cib_factory.ensure_cib_updated()
+ with logger_utils.buffer(): # keep error messages
+ set_obj = mkset_obj(*args)
+ return set_obj.edit()
+
+ def _verify(self, set_obj_semantic, set_obj_all):
+ rc1 = set_obj_all.verify()
+ if config.core.check_frequency != "never":
+ rc2 = set_obj_semantic.semantic_check(set_obj_all)
+ else:
+ rc2 = 0
+ return rc1 and rc2 <= 1
+
+ @command.skill_level('administrator')
+ def do_verify(self, context):
+ "usage: verify"
+ cib_factory.ensure_cib_updated()
+ set_obj_all = mkset_obj("xml")
+ return self._verify(set_obj_all, set_obj_all)
+
+ @command.name('validate-all')
+ @command.alias('validate_all')
+ @command.skill_level('administrator')
+ @command.completers_repeating(_id_list)
+ def do_validate_all(self, context, rsc):
+ "usage: validate-all <rsc>"
+ cib_factory.ensure_cib_updated()
+ from . import ra
+ from . import cibconfig
+ from . import cliformat
+ obj = cib_factory.find_object(rsc)
+ if not obj:
+ context.error("Not found: %s" % (rsc))
+ if obj.obj_type != "primitive":
+ context.error("Not a primitive: %s" % (rsc))
+ rnode = cibconfig.reduce_primitive(obj.node)
+ if rnode is None:
+ context.error("No resource template %s for %s" % (obj.node.get("template"), rsc))
+ params = []
+ for attrs in rnode.iterchildren("instance_attributes"):
+ params.extend(cliformat.nvpairs2list(attrs))
+ if not all(nvp.get('name') is not None and nvp.get('value') is not None for nvp in params):
+ context.error("Primitive too complex: %s" % (rsc))
+ params = dict([(nvp.get('name'), nvp.get('value')) for nvp in params])
+ agentname = xmlutil.mk_rsc_type(rnode)
+ if not ra.can_validate_agent(agentname):
+ context.error("%s: Cannot run validate-all for agent: %s" % (rsc, agentname))
+ rc, _ = ra.validate_agent(agentname, params, log=True)
+ return rc == 0
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(_id_show_list)
+ def do_save(self, context, *args):
+ "usage: save [xml] [<id>...] <filename>"
+ if not args:
+ context.fatal_error("Expected 1 argument (0 given)")
+ cib_factory.ensure_cib_updated()
+ filename = args[-1]
+ setargs = args[:-1]
+ set_obj = mkset_obj(*setargs)
+ return set_obj.save_to_file(filename)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(['xml', 'replace', 'update', 'push']), _load_2nd_completer)
+ def do_load(self, context, *args):
+ "usage: load [xml] {replace|update|push} {<url>|<path>}"
+ if len(args) < 2:
+ context.fatal_error("Expected 2 arguments (0 given)")
+ if args[0] == "xml":
+ if len(args) != 3:
+ context.fatal_error("Expected 3 arguments (%d given)" % len(args))
+ url = args[2]
+ method = args[1]
+ xml = True
+ else:
+ if len(args) != 2:
+ context.fatal_error("Expected 2 arguments (%d given)" % len(args))
+ url = args[1]
+ method = args[0]
+ xml = False
+ if method not in ("replace", "update", "push"):
+ context.fatal_error("Unknown method %s" % method)
+ cib_factory.ensure_cib_updated()
+ if method == "replace":
+ if options.interactive and cib_factory.has_cib_changed():
+ if not utils.ask("This operation will erase all changes. Do you want to proceed?"):
+ return False
+ cib_factory.erase()
+ if xml:
+ set_obj = mkset_obj("xml")
+ else:
+ set_obj = mkset_obj()
+ return set_obj.import_file(method, url)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(list(gv_types.keys()) + ['exportsettings']))
+ def do_graph(self, context, *args):
+ "usage: graph [<gtype> [<file> [<img_format>]]]"
+ if args and args[0] == "exportsettings":
+ return utils.save_graphviz_file(userdir.GRAPHVIZ_USER_FILE, constants.graph)
+ cib_factory.ensure_cib_updated()
+ set_obj = mkset_obj()
+ rc = set_obj.query_graph(*args)
+ if rc is None:
+ context.fatal_error("Failed to create graph")
+ return rc
+
+ def _stop_if_running(self, rscs):
+ rscstate = xmlutil.RscState()
+ to_stop = [rsc for rsc in rscs if rscstate.is_running(rsc)]
+ from .ui_resource import set_deep_meta_attr
+ if len(to_stop) > 0:
+ ok = all(set_deep_meta_attr(rsc, 'target-role', 'Stopped',
+ commit=False) for rsc in to_stop)
+ if not ok or not cib_factory.commit():
+ raise ValueError("Failed to stop one or more running resources: %s" %
+ (', '.join(to_stop)))
+ return len(to_stop)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(_id_list)
+ @command.alias('rm')
+ def do_delete(self, context, *args):
+ "usage: delete [-f|--force] <id> [<id>...]"
+ argl = list(args)
+ arg_force = any((x in ('-f', '--force')) for x in argl)
+ argl = [x for x in argl if x not in ('-f', '--force')]
+ if arg_force or config.core.force:
+ if self._stop_if_running(argl) > 0:
+ utils.wait4dc(what="Stopping %s" % (", ".join(argl)))
+ cib_factory.ensure_cib_updated()
+ return cib_factory.delete(*argl)
+
+ @command.name('default-timeouts')
+ @command.alias('default_timeouts')
+ @command.completers_repeating(_id_list)
+ def do_default_timeouts(self, context, *args):
+ "usage: default-timeouts <id> [<id>...]"
+ cib_factory.ensure_cib_updated()
+ return cib_factory.default_timeouts(*args)
+
+ @command.skill_level('administrator')
+ @command.completers(_id_list)
+ def do_rename(self, context, old_id, new_id):
+ "usage: rename <old_id> <new_id>"
+ cib_factory.ensure_cib_updated()
+ return cib_factory.rename(old_id, new_id)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(['nodes']))
+ def do_erase(self, context, nodes=None):
+ "usage: erase [nodes]"
+ if not options.regression_tests:
+ logger.warning("`crm configure erase` is deprecated. The replacement could be `crm cluster remove [node]`")
+ return True
+ cib_factory.ensure_cib_updated()
+ if nodes is None:
+ return cib_factory.erase()
+ if nodes != 'nodes':
+ context.fatal_error("Expected 'nodes' (found '%s')" % (nodes))
+ return cib_factory.erase_nodes()
+
+ @command.skill_level('administrator')
+ def do_refresh(self, context):
+ "usage: refresh"
+ if options.interactive and cib_factory.has_cib_changed():
+ if not utils.ask("All changes will be dropped. Do you want to proceed?"):
+ return
+ cib_factory.refresh()
+
+ @command.alias('simulate')
+ @command.completers(compl.choice(['nograph']))
+ def do_ptest(self, context, *args):
+ "usage: ptest [nograph] [v...] [scores] [utilization] [actions]"
+ # use ptest/crm_simulate depending on which command was
+ # used
+ config.core.ptest = constants.simulate_programs[context.get_command_name()]
+ if not config.core.ptest:
+ return False
+ set_obj = mkset_obj("xml")
+ return ui_utils.ptestlike(set_obj.ptest, 'vv', context.get_command_name(), args)
+
+ def _commit(self, force=False, replace=False):
+ if not cib_factory.has_cib_changed():
+ logger.info("apparently there is nothing to commit")
+ logger.info("try changing something first")
+ return True
+ rc1 = True
+ if replace and not force:
+ rc1 = cib_factory.is_current_cib_equal()
+ rc2 = cib_factory.has_no_primitives() or \
+ self._verify(mkset_obj("xml", "changed"), mkset_obj("xml"))
+ if rc1 and rc2:
+ return cib_factory.commit(replace=replace)
+ if force or config.core.force:
+ logger.info("commit forced")
+ return cib_factory.commit(force=True, replace=replace)
+ if utils.ask("Do you still want to commit?"):
+ return cib_factory.commit(force=True, replace=replace)
+ return False
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers(compl.choice(['force', 'replace']), compl.choice(['force', 'replace']))
+ def do_commit(self, context, arg0=None, arg1=None):
+ "usage: commit [force] [replace]"
+ force = "force" in [arg0, arg1]
+ replace = "replace" in [arg0, arg1]
+ if arg0 is not None and arg0 not in ("force", "replace"):
+ logger_utils.syntax_err(('configure.commit', arg0))
+ return False
+ if arg1 is not None and arg1 not in ("force", "replace"):
+ logger_utils.syntax_err(('configure.commit', arg1))
+ return False
+ return self._commit(force=force, replace=replace)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(['force']))
+ def do_upgrade(self, context, force=None):
+ "usage: upgrade [force]"
+ if force and force != "force":
+ context.fatal_error("Expected 'force' or no argument")
+ cib_factory.ensure_cib_updated()
+ return cib_factory.upgrade_validate_with(force=config.core.force or force)
+
+ @command.skill_level('administrator')
+ def do_schema(self, context, schema_st=None):
+ "usage: schema [<schema>]"
+ if not schema_st:
+ print(cib_factory.get_schema())
+ return True
+ return cib_factory.change_schema(schema_st)
+
+ def __override_lower_level_attrs(self, *args):
+ """
+ When setting up an attribute of a cluster, the same
+ attribute may already exist in one of the nodes an/or
+ any resource.
+ The user should be informed about it and, if he wants,
+ he will have an option to delete the already existing
+ attribute.
+ """
+ if not args:
+ return
+
+ nvpair = args[0].split('=', 1)
+ if 2 != len(nvpair):
+ return
+
+ attr_name, attr_value = nvpair
+
+ if "maintenance-mode" == attr_name:
+ attr = "maintenance"
+ conflicting_lower_level_attr = 'is-managed'
+ # FIXME! the first argument is hardcoded
+ objs = get_resources_on_nodes(cib_factory.node_id_list(), [ "primitive", "group", "clone"])
+ remove_redundant_attrs(objs, "meta_attributes", attr, conflicting_lower_level_attr)
+
+ objs = get_resources_on_nodes(cib_factory.node_id_list(), [ "node" ])
+ remove_redundant_attrs(objs, "instance_attributes", attr, conflicting_lower_level_attr)
+
+ def __conf_object(self, cmd, *args):
+ "The configure object command."
+ if cmd in list(constants.cib_cli_map.values()) and \
+ not cib_factory.is_elem_supported(cmd):
+ logger.error("%s not supported by the RNG schema" % cmd)
+ return False
+ cib_factory.ensure_cib_updated()
+ if not args:
+ return cib_factory.create_object(cmd, *args)
+ if args[0].startswith("id="):
+ object_id = args[0][3:]
+ else:
+ object_id = args[0]
+ params = (object_id,) + args[1:]
+ return cib_factory.create_object(cmd, *params)
+
+ @command.skill_level('administrator')
+ @command.completers(_node_id_list, compl.choice(constants.node_attributes_keyw))
+ def do_node(self, context, *args):
+ """usage: node <uname>[:<type>]
+ [attributes <param>=<value> [<param>=<value>...]]
+ [utilization <param>=<value> [<param>=<value>...]]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id, ra_classes_or_tmpl, primitive_complete_complex)
+ @command.alias('resource')
+ def do_primitive(self, context, *args):
+ """usage: primitive <rsc> {[<class>:[<provider>:]]<type>|@<template>}
+ [[params] <param>=<value> [<param>=<value>...]]
+ [meta <attribute>=<value> [<attribute>=<value>...]]
+ [utilization <attribute>=<value> [<attribute>=<value>...]]
+ [operations id_spec
+ [op op_type [<attribute>=<value>...]
+ [[op_params] <param>=<value> [<param>=<value>...]]
+ [op_meta <attribute>=<value> [<attribute>=<value>...]] ...]]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.completers_repeating(compl.attr_id, _container_type, container_complete_complex)
+ def do_bundle(self, context, *args):
+ """usage: bundle <bundle id> <container type> [<container option>...]
+ network [<network option>...]
+ storage [<storage option>...]
+ primitive <resource id> {[<class>:[<provider>:]]<type>|@<template>}"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id, _group_completer)
+ def do_group(self, context, *args):
+ """usage: group <name> <rsc> [<rsc>...]
+ [params <param>=<value> [<param>=<value>...]]
+ [meta <attribute>=<value> [<attribute>=<value>...]]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id, _f_children_id_list, _advanced_completer)
+ def do_clone(self, context, *args):
+ """usage: clone <name> <rsc>
+ [params <param>=<value> [<param>=<value>...]]
+ [meta <attribute>=<value> [<attribute>=<value>...]]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.alias('master')
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id, _f_children_id_list, _advanced_completer)
+ def do_ms(self, context, *args):
+ """usage: ms <name> <rsc>
+ [params <param>=<value> [<param>=<value>...]]
+ [meta <attribute>=<value> [<attribute>=<value>...]]"""
+ format_str = " " if "meta" in args else " meta "
+ new_cmd_str = ' '.join(args) + "{}promotable=true".format(format_str)
+ logger.warning('"ms" is deprecated. Please use "clone {}"'.format(new_cmd_str))
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id, ui_ra.complete_class_provider_type,
+ primitive_complete_complex)
+ def do_rsc_template(self, context, *args):
+ """usage: rsc_template <name> [<class>:[<provider>:]]<type>
+ [params <param>=<value> [<param>=<value>...]]
+ [meta <attribute>=<value> [<attribute>=<value>...]]
+ [utilization <attribute>=<value> [<attribute>=<value>...]]
+ [operations id_spec
+ [op op_type [<attribute>=<value>...] ...]]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.attr_id, _top_rsc_id_list)
+ def do_location(self, context, *args):
+ """usage: location <id> <rsc> {node_pref|rules}
+
+ node_pref :: <score>: <node>
+
+ rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+ id_spec :: $id=<id> | $id-ref=<id>
+ score :: <number> | <attribute> | [-]inf
+ expression :: <simple_exp> [bool_op <simple_exp> ...]
+ bool_op :: or | and
+ simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+ type :: string | version | number
+ binary_op :: lt | gt | lte | gte | eq | ne
+ unary_op :: defined | not_defined"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.alias('collocation')
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id, compl.null, top_rsc_tmpl_id_list)
+ def do_colocation(self, context, *args):
+ """usage: colocation <id> <score>: <rsc>[:<role>] <rsc>[:<role>] ...
+ [node-attribute=<node_attr>]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id,
+ compl.call(schema.rng_attr_values, 'rsc_order', 'kind'),
+ top_rsc_tmpl_id_list)
+ def do_order(self, context, *args):
+ """usage: order <id> [kind]: <rsc>[:<action>] <rsc>[:<action>] ...
+ [symmetrical=<bool>]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.attr_id, compl.null, top_rsc_tmpl_id_list)
+ def do_rsc_ticket(self, context, *args):
+ """usage: rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(_property_completer)
+ def do_property(self, context, *args):
+ "usage: property [$id=<set_id>] <option>=<value>"
+ self.__override_lower_level_attrs(*args)
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(_prim_meta_completer)
+ def do_rsc_defaults(self, context, *args):
+ "usage: rsc_defaults [$id=<set_id>] <option>=<value>"
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(op_attr_list)
+ def do_op_defaults(self, context, *args):
+ "usage: op_defaults [$id=<set_id>] <option>=<value>"
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(node_id_colon_list, stonith_resource_list)
+ def do_fencing_topology(self, context, *args):
+ "usage: fencing_topology [<node>:] stonith_resources [stonith_resources ...]"
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ def do_xml(self, context, *args):
+ "usage: xml <xml>"
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers(_f_children_id_list)
+ def do_monitor(self, context, *args):
+ "usage: monitor <rsc>[:<role>] <interval>[:<timeout>]"
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('expert')
+ @command.completers_repeating(compl.null, compl.choice(["role:", "read", "write", "deny"]))
+ def do_user(self, context, *args):
+ """user <uid> {roles|rules}
+
+ roles :: role:<role-ref> [role:<role-ref> ...]
+ rules :: rule [rule ...]
+
+ (See the role command for details on rules.)"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('expert')
+ @command.completers_repeating(compl.null, compl.choice(["read", "write", "deny"]))
+ def do_role(self, context, *args):
+ """role <role-id> rule [rule ...]
+
+ rule :: acl-right cib-spec [attribute:<attribute>]
+
+ acl-right :: read | write | deny
+
+ cib-spec :: xpath-spec | tag-ref-spec
+ xpath-spec :: xpath:<xpath> | shortcut
+ tag-ref-spec :: tag:<tag> | ref:<id> | tag:<tag> ref:<id>
+
+ shortcut :: meta:<rsc>[:<attr>]
+ params:<rsc>[:<attr>]
+ utilization:<rsc>
+ location:<rsc>
+ property[:<attr>]
+ node[:<node>]
+ nodeattr[:<attr>]
+ nodeutil[:<node>]
+ status"""
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('expert')
+ def do_acl_target(self, context, *args):
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.null, top_rsc_tmpl_id_list)
+ def do_tag(self, context, *args):
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('administrator')
+ def do_alert(self, context, *args):
+ return self.__conf_object(context.get_command_name(), *args)
+
+ @command.skill_level('expert')
+ @command.completers_repeating(_rsc_id_list)
+ def do_rsctest(self, context, *args):
+ "usage: rsctest <rsc_id> [<rsc_id> ...] [<node_id> ...]"
+ cib_factory.ensure_cib_updated()
+ rc = True
+ rsc_l = []
+ node_l = []
+ current = "r"
+ for ident in args:
+ el = cib_factory.find_object(ident)
+ if not el:
+ logger.error("element %s does not exist" % ident)
+ rc = False
+ elif current == "r" and xmlutil.is_resource(el.node):
+ if xmlutil.is_container(el.node):
+ rsc_l += el.node.findall("primitive")
+ else:
+ rsc_l.append(el.node)
+ elif xmlutil.is_normal_node(el.node):
+ current = "n"
+ node_l.append(el.node.get("uname"))
+ else:
+ logger_utils.syntax_err((context.get_command_name(), ident), context='rsctest')
+ return False
+ if not rc:
+ return False
+ if not rsc_l:
+ logger.error("specify at least one resource")
+ return False
+ all_nodes = cib_factory.node_id_list()
+ if not node_l:
+ node_l = all_nodes
+ return rsctest.test_resources(rsc_l, node_l, all_nodes)
+
+ def should_wait(self):
+ return cib_factory.has_cib_changed()
+
+ def end_game(self, no_questions_asked=False):
+ ok = True
+ if cib_factory.has_cib_changed():
+ if no_questions_asked or not options.interactive:
+ ok = self._commit()
+ elif utils.ask("There are changes pending. Do you want to commit them?"):
+ ok = self._commit()
+ cib_factory.reset()
+ return ok
diff --git a/crmsh/ui_context.py b/crmsh/ui_context.py
new file mode 100644
index 0000000..5e33379
--- /dev/null
+++ b/crmsh/ui_context.py
@@ -0,0 +1,413 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import shlex
+import sys
+from . import config
+from . import utils
+from . import options
+from . import ui_utils
+from . import userdir
+from . import constants
+from . import log
+from . import main
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+class Context(object):
+ """
+ Context is a cursor that marks the current
+ location of the user in the UI hierarchy.
+ It maintains a stack of UILevel objects, so
+ level_stack[-1] is the current level.
+
+ The Context is passed as the first parameter
+ to any command.
+ """
+ def __init__(self, root):
+ self.stack = [root]
+ self._mark = 0
+ self._in_transit = False
+ self._wait_for_dc = False
+
+ # holds information about the currently
+ # executing command
+ self.command_name = None
+ self.command_args = None
+ self.command_info = None
+
+ # readline cache
+ self._rl_line = None
+ self._rl_words = []
+
+ def run(self, line):
+ '''
+ Execute the given command line.
+ '''
+ promptstr = "crm({}/{}){}# ".format(main.cib_prompt(), utils.this_node(), self.prompt())
+ logger_utils.log_only_to_file("{}{}".format(promptstr, line))
+
+ line = line.strip()
+ if not line or line.startswith('#'):
+ return True
+
+ line = _try_redirect_help_argument_to_subcommand(line)
+
+ self._mark = len(self.stack)
+ self._in_transit = False
+ self._wait_for_dc = False
+
+ rv = True
+ cmd = False
+ try:
+ tokens = shlex.split(line)
+ while tokens:
+ token, tokens = tokens[0], tokens[1:]
+ self.command_name = token
+ self.command_args = tokens
+ self.command_info = self.current_level().get_child(token)
+ if not self.command_info:
+ self.fatal_error("No such command")
+ if self.command_name in self.command_info.aliases and self.command_name not in ["-h", "--help"]:
+ logger.warning("This command '%s' is deprecated, please use '%s'", self.command_name, self.command_info.name)
+ if token != self.command_info.name:
+ logger.info("\"%s\" is accepted as \"%s\"", token, self.command_info.name)
+ self.command_name = self.command_info.name
+ if self.command_info.type == 'level':
+ self.enter_level(self.command_info.level)
+ else:
+ cmd = True
+ break
+ if cmd:
+ utils.check_user_access(self.current_level().name)
+ rv = self.execute_command() is not False
+ except (ValueError, IOError) as e:
+ logger.error("%s: %s", self.get_qualified_name(), e, exc_info=e)
+ rv = False
+ except utils.TerminateSubCommand:
+ return False
+ if cmd or (rv is False):
+ rv = self._back_out() and rv
+
+ # wait for dc if wait flag set
+ if self._wait_for_dc:
+ return utils.wait4dc(self.command_name, not options.batch)
+ return rv
+
+ def complete(self, line):
+ '''
+ Given a (partial) command line, returns
+ a list of potential completions.
+ A space at the end of the line is significant.
+ '''
+ complete_next = line.endswith(' ')
+ # if complete_next:
+ # print >>sys.stderr, "complete_next is on"
+
+ # copy current state
+ prev_stack = list(self.stack)
+ prev_name = self.command_name
+ prev_args = self.command_args
+ prev_info = self.command_info
+ try:
+ line = line.strip()
+ if not line or line.startswith('#'):
+ return self.current_level().get_completions()
+
+ try:
+ tokens = shlex.split(line)
+ if complete_next:
+ tokens += ['']
+ while tokens:
+ token, tokens = tokens[0], tokens[1:]
+ self.command_name = token
+ self.command_args = tokens
+ self.command_info = self.current_level().get_child(token)
+
+ if not self.command_info:
+ return self.current_level().get_completions()
+ if self.command_info.type == 'level':
+ self.enter_level(self.command_info.level)
+ else:
+ # use the completer for the command
+ ret = self.command_info.complete(self, tokens)
+ if tokens:
+ ret = [t for t in ret if t.startswith(tokens[-1])]
+
+ if not ret or self.command_info.aliases:
+ if not token in self.current_level().get_completions():
+ return self.current_level().get_completions()
+ if self.command_name in self.command_info.aliases and not self.command_args:
+ return [self.command_name]
+ return ret
+ # reached the end on a valid level.
+ # return the completions for the previous level.
+ if self.previous_level():
+ return self.previous_level().get_completions()
+ # not sure this is the right thing to do
+ return self.current_level().get_completions()
+ except ValueError:
+ # logger.error("%s: %s" % (self.get_qualified_name(), msg))
+ pass
+ except IOError:
+ # logger.error("%s: %s" % (self.get_qualified_name(), msg))
+ pass
+ return []
+ finally:
+ # restore level stack
+ self.stack = prev_stack
+ self.command_name = prev_name
+ self.command_args = prev_args
+ self.command_info = prev_info
+
+ def setup_readline(self):
+ import readline
+ readline.set_history_length(100)
+ for v in ('tab: complete',
+ # 'set bell-style visible',
+ # 'set menu-complete-display-prefix on',
+ # 'set show-all-if-ambiguous on',
+ # 'set show-all-if-unmodified on',
+ 'set skip-completed-text on'):
+ readline.parse_and_bind(v)
+ readline.set_completer(self.readline_completer)
+ readline.set_completer_delims(' \t\n,')
+ try:
+ readline.read_history_file(userdir.HISTORY_FILE)
+ except IOError:
+ pass
+
+ def disable_completion(self):
+ import readline
+ readline.parse_and_bind('tab: complete')
+ readline.set_completer(self.disable_completer)
+
+ def disable_completer(self, text, state):
+ # complete nothing
+ return
+
+ def clear_readline_cache(self):
+ self._rl_line = None
+ self._rl_words = []
+
+ def readline_completer(self, text, state):
+ import readline
+
+ def matching(word):
+ 'we are only completing the last word in the line'
+ return word.split()[-1].startswith(text)
+
+ line = utils.get_line_buffer() + readline.get_line_buffer()
+ if line != self._rl_line:
+ try:
+ self._rl_line = line
+ completions = self.complete(line)
+ if text:
+ self._rl_words = [w for w in completions if matching(w) and not w.startswith("_")]
+ else:
+ self._rl_words = [w for w in completions if not w.startswith("_")]
+
+ except Exception: # , msg:
+ # logging.exception(msg)
+ self.clear_readline_cache()
+
+ try:
+ ret = self._rl_words[state]
+ except IndexError:
+ ret = None
+ # logging.debug("line:%s, text:%s, ret:%s, state:%s", repr(line), repr(text), ret, state)
+ if not text or (ret and line.split()[-1].endswith(ret)):
+ if ret == "id=":
+ return ret
+ return ret + ' '
+ return ret
+
+ def current_level(self):
+ return self.stack[-1]
+
+ def previous_level(self):
+ if len(self.stack) > 1:
+ return self.stack[-2]
+ return None
+
+ def enter_level(self, level):
+ '''
+ Pushes an instance of the given UILevel
+ subclass onto self.stack. Checks prerequirements
+ for the level (if any).
+ '''
+ # on entering new level we need to set the
+ # interactive option _before_ creating the level
+ if not options.interactive and not self.command_args:
+ self._set_interactive()
+
+ # not sure what this is all about
+ self._in_transit = True
+
+ entry = level()
+ if 'requires' in dir(entry) and not entry.requires():
+ self.fatal_error("Missing requirements")
+ self.stack.append(entry)
+ self.clear_readline_cache()
+
+ def _set_interactive(self):
+ '''Set the interactive option only if we're on a tty.'''
+ if utils.can_ask():
+ options.interactive = True
+
+ def execute_command(self):
+ # build argument list
+ arglist = [self.current_level(), self] + self.command_args
+ # nskip = 2 to skip self and context when reporting errors
+ ui_utils.validate_arguments(self.command_info.function, arglist, nskip=2)
+ self.check_skill_level(self.command_info.skill_level)
+ rv = self.command_info.function(*arglist)
+
+ # should we wait till the command takes effect?
+ if self.should_wait():
+ self._wait_for_dc = True
+ return rv
+
+ def should_wait(self):
+ if not config.core.wait:
+ return False
+
+ if self.command_info.wait:
+ return True
+
+ by_level = self.current_level().should_wait()
+ transit_or_noninteractive = self.is_in_transit() or not options.interactive
+ return by_level and transit_or_noninteractive
+
+ def is_in_transit(self):
+ '''
+ TODO
+ FIXME
+ '''
+ return self._in_transit
+
+ def check_skill_level(self, skill_level):
+ levels_to = {0: 'operator', 1: 'administrator', 2: 'expert'}
+ levels_from = {'operator': 0, 'administrator': 1, 'expert': 2}
+ if levels_from.get(config.core.skill_level, 0) < skill_level:
+ self.fatal_error("ACL %s skill level required" %
+ (levels_to.get(skill_level, 'other')))
+
+ def get_command_name(self):
+ "Returns name used to call the current command"
+ return self.command_name
+
+ def get_qualified_name(self):
+ "Returns level.command if level is not root"
+ names = '.'.join([l.name for l in self.stack[1:]])
+ if names:
+ return "%s.%s" % (names, self.get_command_name())
+ return self.get_command_name()
+
+ def get_command_info(self):
+ "Returns the ChildInfo object for the current command or level"
+ return self.command_info
+
+ def up(self):
+ '''
+ Navigate up in the levels hierarchy
+ '''
+ ok = True
+ if len(self.stack) > 1:
+ ok = self.current_level().end_game(no_questions_asked=self._in_transit) is not False
+ self.stack.pop()
+ self.clear_readline_cache()
+ return ok
+
+ def _back_out(self):
+ '''
+ Restore the stack to the marked position
+ '''
+ ok = True
+ while self._mark > 0 and len(self.stack) > self._mark:
+ ok = self.up() and ok
+ return ok
+
+ def save_stack(self):
+ self._mark = len(self.stack)
+
+ def quit(self, rc=0):
+ '''
+ Exit from the top level
+ '''
+ ok = self.current_level().end_game()
+ if options.interactive and not options.batch:
+ if constants.need_reset:
+ utils.ext_cmd("reset")
+ else:
+ print("bye")
+ if ok is False and rc == 0:
+ rc = 1
+ sys.exit(rc)
+
+ def level_name(self):
+ '''
+ Returns the name of the current level.
+ Returns 'root' if at the root level.
+ '''
+ return self.current_level().name
+
+ def prompt(self):
+ 'returns a prompt generated from the level stack'
+ return ' '.join(l.name for l in self.stack[1:])
+
+ def previous_level_is(self, level_name):
+ '''
+ Check call stack for previous level name
+ '''
+ prev = self.previous_level()
+ return prev and prev.name == level_name
+
+ def error(self, msg):
+ """
+ Too easy to misremember and type error()
+ when I meant fatal_error().
+ """
+ raise ValueError(msg)
+
+ def fatal_error(self, msg):
+ """
+ TODO: Better error messages, with full context information
+ Raise exception to get thrown out to run()
+ """
+ raise ValueError(msg)
+
+ def error_message(self, msg):
+ """
+ Error message only, don't cancel execution of command
+ """
+ logger.error("%s: %s", self.get_qualified_name(), msg)
+
+ def warning(self, msg):
+ logger.warning("%s: %s", self.get_qualified_name(), msg)
+
+ def info(self, msg):
+ logger.info("%s: %s", self.get_qualified_name(), msg)
+
+
+def _try_redirect_help_argument_to_subcommand(line):
+ tokens = shlex.split(line)
+ if "--help-without-redirect" in tokens:
+ # Help text of some subcommands are read from `--help` generated by argparse.
+ # They should not be redirected, or a circular redirect will be formed.
+ # See crmsh.help._load_help.
+ return ' '.join(("--help" if token == "--help-without-redirect" else token for token in tokens))
+ elif tokens[-1] in ["-h", "--help"]:
+ if len(tokens) == 2 and tokens[0] == 'report':
+ # subcommand report has --help implementation, but _load_help doest not load from it.
+ return line
+ else:
+ return 'help ' + ' '.join(tokens[:-1])
+ else:
+ return line
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/ui_corosync.py b/crmsh/ui_corosync.py
new file mode 100644
index 0000000..97c2073
--- /dev/null
+++ b/crmsh/ui_corosync.py
@@ -0,0 +1,174 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+from . import command, sh
+from . import completers
+from . import utils
+from . import corosync
+from . import parallax
+from . import bootstrap
+from . import log
+from .service_manager import ServiceManager
+
+logger = log.setup_logger(__name__)
+
+
+def _push_completer(args):
+ try:
+ n = utils.list_cluster_nodes()
+ n.remove(utils.this_node())
+ if args[-1] in n:
+ # continue complete
+ return [args[-1]]
+ for item in args:
+ if item in n:
+ n.remove(item)
+ return n
+ except:
+ n = []
+
+
+def _diff_nodes(args):
+ try:
+ if len(args) > 3:
+ return []
+ n = utils.list_cluster_nodes()
+ if args[-1] in n:
+ # continue complete
+ return [args[-1]]
+ for item in args:
+ if item in n:
+ # remove already complete item
+ n.remove(item)
+ return n
+ except:
+ return []
+
+
+class Corosync(command.UI):
+ '''
+ Corosync is the underlying messaging layer for most HA clusters.
+ This level provides commands for editing and managing the corosync
+ configuration.
+ '''
+ name = "corosync"
+
+ def requires(self):
+ return corosync.check_tools()
+
+ @command.completers(completers.choice(['ring', 'quorum', 'qdevice', 'qnetd']))
+ def do_status(self, context, status_type="ring"):
+ '''
+ Quick cluster health status. Corosync status or QNetd status
+ '''
+ if not ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("corosync.service"):
+ logger.error("corosync.service is not running!")
+ return False
+
+ try:
+ corosync.query_status(status_type)
+ except ValueError as err:
+ logger.error(str(err))
+ return False
+
+ @command.skill_level('administrator')
+ def do_reload(self, context):
+ '''
+ Reload the corosync configuration
+ '''
+ return corosync.cfgtool('-R')[0] == 0
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(_push_completer)
+ def do_push(self, context, *nodes):
+ '''
+ Push corosync configuration to other cluster nodes.
+ If no nodes are provided, configuration is pushed to
+ all other cluster nodes.
+ '''
+ if not nodes:
+ nodes = utils.list_cluster_nodes()
+ nodes.remove(utils.this_node())
+ return corosync.push_configuration(nodes)
+
+ @command.skill_level('administrator')
+ @command.completers(_push_completer)
+ def do_pull(self, context, node):
+ '''
+ Pull corosync configuration from another node.
+ '''
+ return corosync.pull_configuration(node)
+
+ @command.completers_repeating(_diff_nodes)
+ def do_diff(self, context, *nodes):
+ '''
+ Compare corosync configuration between nodes.
+ '''
+ checksum = False
+ if nodes and nodes[0] == '--checksum':
+ checksum = True
+ nodes = nodes[1:]
+ if not nodes:
+ nodes = utils.list_cluster_nodes()
+ return corosync.diff_configuration(nodes, checksum=checksum)
+
+ @command.skill_level('administrator')
+ def do_edit(self, context):
+ '''
+ Edit the corosync configuration.
+ '''
+ cfg = corosync.conf()
+ try:
+ utils.edit_file_ext(cfg, template='')
+ except IOError as e:
+ context.fatal_error(str(e))
+
+ def do_show(self, context):
+ '''
+ Display the corosync configuration.
+ '''
+ cfg = corosync.conf()
+ if not os.path.isfile(cfg):
+ context.fatal_error("No corosync configuration found on this node.")
+ utils.page_string(open(cfg).read())
+
+ def do_log(self, context):
+ '''
+ Display the corosync log file (if any).
+ '''
+ logfile = corosync.get_value('logging.logfile')
+ if not logfile:
+ context.fatal_error("No corosync log file configured")
+ utils.page_file(logfile)
+
+ @command.name('add-node')
+ @command.alias('add_node')
+ @command.skill_level('administrator')
+ def do_addnode(self, context, addr, name=None):
+ "Add a node to the corosync nodelist"
+ corosync.add_node(addr, name)
+
+ @command.name('del-node')
+ @command.alias('del_node')
+ @command.skill_level('administrator')
+ @command.completers(_push_completer)
+ def do_delnode(self, context, name):
+ "Remove a node from the corosync nodelist"
+ transport = corosync.get_value('totem.transport')
+ if not transport or transport != "udpu":
+ context.fatal_error("Only support udpu transport now")
+ corosync.del_node(name)
+
+ @command.skill_level('administrator')
+ @command.completers(completers.call(corosync.get_all_paths))
+ def do_get(self, context, path):
+ "Get a corosync configuration value"
+ for v in corosync.get_values(path):
+ print(v)
+
+ @command.skill_level('administrator')
+ @command.completers(completers.call(corosync.get_all_paths))
+ def do_set(self, context, path, value):
+ "Set a corosync configuration value"
+ corosync.set_value(path, value)
diff --git a/crmsh/ui_history.py b/crmsh/ui_history.py
new file mode 100644
index 0000000..6697282
--- /dev/null
+++ b/crmsh/ui_history.py
@@ -0,0 +1,642 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import sys
+import time
+import re
+import bz2
+from . import config
+from . import command
+from . import completers as compl
+from . import utils
+from . import ui_utils
+from . import xmlutil
+from . import options
+from .cibconfig import mkset_obj, cib_factory
+from .sh import ShellUtils
+from . import history
+from . import cmd_status
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+ptest_options = ["@v+", "nograph", "scores", "actions", "utilization"]
+
+
+@utils.memoize
+def crm_report():
+ return history.Report()
+
+
+class History(command.UI):
+ '''
+ The history class
+ '''
+ name = "history"
+
+ def __init__(self):
+ command.UI.__init__(self)
+ self.current_session = None
+ self._source_inited = False
+
+ def _init_source(self):
+ if self._source_inited:
+ return True
+ self._source_inited = True
+ return self._set_source(options.history)
+
+ def _set_period(self, from_time='', to_time=''):
+ '''
+ parse time specs and set period
+ '''
+ from_dt = to_dt = None
+ if from_time:
+ from_dt = utils.parse_time(from_time)
+ if not from_dt:
+ return False
+ if to_time:
+ to_dt = utils.parse_time(to_time)
+ if not to_dt:
+ return False
+ if to_dt and from_dt:
+ if to_dt < from_dt:
+ from_dt, to_dt = to_dt, from_dt
+ elif to_dt == from_dt:
+ logger.error("%s - %s: To and from dates cannot be the same", from_time, to_time)
+ return False
+ return crm_report().set_period(from_dt, to_dt)
+
+ def _set_source(self, src, live_from_time=None):
+ '''
+ Have the last history source survive the History
+ and Report instances
+ '''
+ def _check_source():
+ return (src == 'live') or os.path.isfile(src) or os.path.isdir(src)
+
+ logger.debug("setting source to %s", src)
+ if not _check_source():
+ if os.path.exists(crm_report().get_session_dir(src)):
+ logger.debug("Interpreting %s as session", src)
+ if crm_report().load_state(crm_report().get_session_dir(src)):
+ options.history = crm_report().get_source()
+ crm_report().prepare_source()
+ self.current_session = src
+ return True
+ else:
+ logger.error("source %s doesn't exist", src)
+ return False
+ crm_report().set_source(src)
+ options.history = src
+ self.current_session = None
+ to_time = ''
+ if src == "live":
+ from_time = time.ctime(live_from_time and live_from_time or (time.time() - 60*60))
+ else:
+ from_time = ''
+ return self._set_period(from_time, to_time)
+
+ @command.skill_level('administrator')
+ def do_source(self, context, src=None):
+ "usage: source {<dir>|<file>|live}"
+ if src is None:
+ print("Current source: %s" % (options.history))
+ return True
+ self._init_source()
+ if src != options.history:
+ return self._set_source(src)
+
+ @command.skill_level('administrator')
+ @command.alias('timeframe')
+ def do_limit(self, context, from_time='', to_time=''):
+ "usage: limit [<from_time> [<to_time>]]"
+ self._init_source()
+ if options.history == "live" and not from_time:
+ from_time = time.ctime(time.time() - 60*60)
+ return self._set_period(from_time, to_time)
+
+ @command.skill_level('administrator')
+ def do_refresh(self, context, force=''):
+ "usage: refresh"
+ self._init_source()
+ if force:
+ if force != "force" and force != "--force":
+ context.fatal_error("Expected 'force' or '--force' (was '%s')" % (force))
+ force = True
+ return crm_report().refresh_source(force)
+
+ @command.skill_level('administrator')
+ def do_detail(self, context, detail_lvl):
+ "usage: detail <detail_level>"
+ self._init_source()
+ detail_num = utils.convert2ints(detail_lvl)
+ if detail_num is None or detail_num not in (0, 1):
+ context.fatal_error("Expected '0' or '1' (was '%s')" % (detail_lvl))
+ return crm_report().set_detail(detail_lvl)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(lambda: crm_report().node_list()))
+ def do_setnodes(self, context, *args):
+ "usage: setnodes <node> [<node> ...]"
+ self._init_source()
+ if options.history != "live":
+ logger.info("setting nodes not necessary for existing reports, proceeding anyway")
+ return crm_report().set_nodes(*args)
+
+ @command.skill_level('administrator')
+ def do_info(self, context):
+ "usage: info"
+ self._init_source()
+ return crm_report().info()
+
+ @command.skill_level('administrator')
+ def do_latest(self, context):
+ "usage: latest"
+ self._init_source()
+ if not utils.wait4dc("transition", not options.batch):
+ return False
+ self._set_source("live")
+ crm_report().refresh_source()
+ f = self._get_pe_byidx(-1)
+ if not f:
+ return False
+ crm_report().show_transition_log(f)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(lambda: crm_report().rsc_list()))
+ def do_resource(self, context, *args):
+ "usage: resource <rsc> [<rsc> ...]"
+ self._init_source()
+ return crm_report().resource(*args)
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers_repeating(compl.call(lambda: crm_report().node_list()))
+ def do_node(self, context, *args):
+ "usage: node <node> [<node> ...]"
+ self._init_source()
+ return crm_report().node(*args)
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(lambda: crm_report().node_list()))
+ def do_log(self, context, *args):
+ "usage: log [<node> ...]"
+ self._init_source()
+ return crm_report().show_log(*args)
+
+ def ptest(self, nograph, scores, utilization, actions, verbosity):
+ 'Send a decompressed self.pe_file to ptest'
+ try:
+ bits = bz2.decompress(open(self.pe_file, "rb").read())
+ except IOError as msg:
+ logger.error("open: %s", msg)
+ return False
+ return utils.run_ptest(bits, nograph, scores, utilization, actions, verbosity)
+
+ @command.skill_level('administrator')
+ def do_events(self, context):
+ "usage: events"
+ self._init_source()
+ return crm_report().events()
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['v'])))
+ def do_peinputs(self, context, *args):
+ """usage: peinputs [{<range>|<number>} ...] [v]"""
+ self._init_source()
+ argl = list(args)
+ opt_l = utils.fetch_opts(argl, ["v"])
+ if argl:
+ l = []
+ for s in argl:
+ a = utils.convert2ints(s.split(':'))
+ if a and len(a) == 2 and not utils.check_range(a):
+ logger.error("%s: invalid peinputs range", a)
+ return False
+ l += crm_report().pelist(a, verbose=("v" in opt_l))
+ else:
+ l = crm_report().pelist(verbose=("v" in opt_l))
+ if not l:
+ return False
+ s = '\n'.join(l)
+ utils.page_string(s)
+
+ def _get_pe_byname(self, s):
+ l = crm_report().find_pe_files(s)
+ if len(l) == 0:
+ logger.error("%s: path not found", s)
+ return None
+ elif len(l) > 1:
+ logger.error("%s: path ambiguous", s)
+ return None
+ return l[0]
+
+ def _get_pe_byidx(self, idx):
+ l = crm_report().pelist()
+ if len(l) < abs(idx):
+ if idx == -1:
+ logger.error("no transitions found in the source")
+ else:
+ logger.error("PE input file for index %d not found", (idx+1))
+ return None
+ return l[idx]
+
+ def _get_pe_bynum(self, n):
+ l = crm_report().pelist([n])
+ if len(l) == 0:
+ logger.error("PE file %d not found", n)
+ return None
+ elif len(l) > 1:
+ logger.error("PE file %d ambiguous", n)
+ return None
+ return l[0]
+
+ def _get_pe_input(self, pe_spec):
+ '''Get PE input file from the <number>|<index>|<file>
+ spec.'''
+ if re.search('pe-', pe_spec):
+ f = self._get_pe_byname(pe_spec)
+ elif utils.is_int(pe_spec):
+ n = int(pe_spec)
+ if n <= 0:
+ f = self._get_pe_byidx(n-1)
+ else:
+ f = self._get_pe_bynum(n)
+ else:
+ f = self._get_pe_byidx(-1)
+ return f
+
+ def _show_pe(self, f, opt_l):
+ self.pe_file = f # self.pe_file needed by self.ptest
+ ui_utils.ptestlike(self.ptest, 'vv', "transition", opt_l)
+ return crm_report().show_transition_log(f)
+
+ def _display_dot(self, f):
+ if not config.core.dotty:
+ logger.error("install graphviz to draw transition graphs")
+ return False
+ f = crm_report().pe2dot(f)
+ if not f:
+ logger.error("dot file not found in the report")
+ return False
+ utils.show_dot_graph(f, keep_file=True, desc="configuration graph")
+ return True
+
+ def _pe2shadow(self, f, argl):
+ try:
+ name = argl[0]
+ except:
+ name = os.path.basename(f).replace(".bz2", "")
+ logger.info("transition %s saved to shadow %s", f, name)
+ return xmlutil.pe2shadow(f, name)
+
+ @command.skill_level('administrator')
+ def do_transitions(self, context):
+ self._init_source()
+ s = '\n'.join(crm_report().show_transitions())
+ utils.page_string(s)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['log', 'showdot', 'save'])))
+ def do_transition(self, context, *args):
+ """usage: transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+ transition showdot [<number>|<index>|<file>]
+ transition log [<number>|<index>|<file>]
+ transition save [<number>|<index>|<file> [name]]"""
+ self._init_source()
+ argl = list(args)
+ subcmd = "show"
+ if argl and argl[0] in ("showdot", "log", "save", "tags"):
+ subcmd = argl[0]
+ del argl[0]
+ if subcmd == "show":
+ opt_l = utils.fetch_opts(argl, ptest_options)
+ if argl:
+ f = self._get_pe_input(argl[0])
+ del argl[0]
+ else:
+ f = self._get_pe_byidx(-1)
+ if (subcmd == "save" and len(argl) > 1) or \
+ (subcmd in ("show", "showdot", "log") and argl):
+ logger_utils.syntax_err(args, context="transition")
+ return False
+ if not f:
+ return False
+ if subcmd == "show":
+ logger.info("running ptest with %s", f)
+ rc = self._show_pe(f, opt_l)
+ elif subcmd == "showdot":
+ rc = self._display_dot(f)
+ elif subcmd == "save":
+ rc = self._pe2shadow(f, argl)
+ elif subcmd == "tags":
+ tags = crm_report().get_transition_tags(f)
+ rc = tags is not None
+ if rc:
+ print(' '.join(tags) if len(tags) else "No tags.")
+ else:
+ rc = crm_report().show_transition_log(f, True)
+ return rc
+
+ def _save_cib_env(self):
+ try:
+ self._cib_f_save = os.environ["CIB_file"]
+ except:
+ self._cib_f_save = None
+
+ def _reset_cib_env(self):
+ if self._cib_f_save:
+ os.environ["CIB_file"] = self._cib_f_save
+ else:
+ try:
+ del os.environ["CIB_file"]
+ except:
+ pass
+
+ def _setup_cib_env(self, pe_f):
+ '''Setup the CIB_file environment variable.
+ Alternatively, we could (or should) use shadows, but the
+ file/shadow management would be a bit involved.'''
+ if pe_f != "live":
+ os.environ["CIB_file"] = pe_f
+ else:
+ self._reset_cib_env()
+
+ def _pe_config_obj(self, pe_f):
+ '''Return set_obj of the configuration. It can later be
+ rendered using the repr() method.'''
+ self._setup_cib_env(pe_f)
+ if not cib_factory.refresh():
+ set_obj = mkset_obj("NOOBJ")
+ else:
+ set_obj = mkset_obj()
+ return set_obj
+
+ def _pe_config_noclr(self, pe_f):
+ '''Configuration with no formatting (no colors).'''
+ return self._pe_config_obj(pe_f).repr_nopretty()
+
+ def _pe_config_plain(self, pe_f):
+ '''Configuration with no formatting (but with colors).'''
+ return self._pe_config_obj(pe_f).repr(format_mode=0)
+
+ def _pe_config(self, pe_f):
+ '''Formatted configuration.'''
+ return self._pe_config_obj(pe_f).repr()
+
+ def _pe_status(self, pe_f):
+ '''Return status as a string.'''
+ self._setup_cib_env(pe_f)
+ rc, s = cmd_status.crm_mon()
+ if rc != 0:
+ if s:
+ logger.error("crm_mon exited with code %d and said: %s", rc, s)
+ else:
+ logger.error("crm_mon exited with code %d", rc)
+ return None
+ return s
+
+ def _pe_status_nohdr(self, pe_f):
+ '''Return status (without header) as a string.'''
+ self._setup_cib_env(pe_f)
+ rc, s = cmd_status.crm_mon()
+ if rc != 0:
+ logger.error("crm_mon exited with code %d and said: %s", rc, s)
+ return None
+ l = s.split('\n')
+ while l and l[0] != "":
+ l = l[1:]
+ while l and l[0] == "":
+ l = l[1:]
+ return '\n'.join(l)
+
+ def _get_diff_pe_input(self, t):
+ if t != "live":
+ return self._get_pe_input(t)
+ if not utils.get_dc():
+ logger.error("cluster not running")
+ return None
+ return "live"
+
+ def _render_pe(self, pe_fun, t):
+ pe_f = self._get_diff_pe_input(t)
+ if not pe_f:
+ return None
+ self._save_cib_env()
+ s = pe_fun(pe_f)
+ self._reset_cib_env()
+ return s
+
+ def _diff(self, pe_fun, t1, t2, html=False, wdiff=False):
+ def _diff_impl(s1, s2, cmd):
+ s = None
+ f1 = utils.str2tmp(s1)
+ f2 = utils.str2tmp(s2)
+ try:
+ if f1 and f2:
+ _, s = ShellUtils().get_stdout(cmd.format(f1=f1, f2=f2))
+ finally:
+ for f in (f1, f2):
+ try:
+ os.unlink(f)
+ except os.error:
+ pass
+ return s
+
+ def _diffhtml(s1, s2, t1, t2):
+ import difflib
+ return ''.join(difflib.HtmlDiff(tabsize=2, wrapcolumn=120).make_table(s1.split('\n'), s2.split('\n'), t1, t2)).replace('&nbsp;&nbsp;', '&nbsp;')
+
+ s1 = self._render_pe(pe_fun, t1)
+ s2 = self._render_pe(pe_fun, t2)
+ if not s1 or not s2:
+ return None
+ if html:
+ s = _diffhtml(s1, s2, t1, t2)
+ elif wdiff:
+ s = _diff_impl(s1, s2, "wdiff {f1} {f2}")
+ else:
+ s = _diff_impl(s1, s2, "diff -U 0 -d -b --label %s --label %s {f1} {f2}" % (t1, t2))
+ return s
+
+ def _common_pe_render_check(self, context, opt_l, *args):
+ if context.previous_level_is("cibconfig") and cib_factory.has_cib_changed():
+ logger.error("please try again after committing CIB changes")
+ return False
+ argl = list(args)
+ supported_l = ["status"]
+ if context.get_command_name() == "diff":
+ supported_l.append("html")
+ opt_l += utils.fetch_opts(argl, supported_l)
+ if argl:
+ logger_utils.syntax_err(' '.join(argl), context=context.get_command_name())
+ return False
+ return True
+
+ @command.skill_level('administrator')
+ @command.name('_dump')
+ def do_dump(self, context, t, *args):
+ '''dump configuration or status to a file and print file
+ name.
+ NB: The configuration is color rendered, but note that
+ that depends on the current value of the TERM variable.
+ '''
+ self._init_source()
+ opt_l = []
+ if not self._common_pe_render_check(context, opt_l, *args):
+ return False
+ if "status" in opt_l:
+ s = self._render_pe(self._pe_status_nohdr, t)
+ else:
+ s = utils.term_render(self._render_pe(self._pe_config_plain, t))
+ if context.previous_level_is("cibconfig"):
+ cib_factory.refresh()
+ if not s:
+ return False
+ print(utils.str2tmp(s))
+
+ @command.skill_level('administrator')
+ @command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['live'])),
+ compl.choice(['status']))
+ def do_show(self, context, t, *args):
+ "usage: show <pe> [status]"
+ self._init_source()
+ opt_l = []
+ if not self._common_pe_render_check(context, opt_l, *args):
+ return False
+ showfun = self._pe_config
+ if "status" in opt_l:
+ showfun = self._pe_status
+ s = self._render_pe(showfun, t)
+ if context.previous_level_is("cibconfig"):
+ cib_factory.refresh()
+ if not s:
+ return False
+ utils.page_string(s)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['live'])))
+ def do_graph(self, context, t, *args):
+ "usage: graph <pe> [<gtype> [<file> [<img_format>]]]"
+ self._init_source()
+ pe_f = self._get_diff_pe_input(t)
+ if not pe_f:
+ return False
+ set_obj = self._pe_config_obj(pe_f)
+ rc = set_obj.query_graph(*args)
+ if rc is None:
+ return False
+ if context.previous_level_is("cibconfig"):
+ cib_factory.refresh()
+ return rc
+
+ @command.skill_level('administrator')
+ @command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['live'])),
+ compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['live'])))
+ def do_diff(self, context, t1, t2, *args):
+ "usage: diff <pe> <pe> [status] [html]"
+ self._init_source()
+ opt_l = []
+ if not self._common_pe_render_check(context, opt_l, *args):
+ return False
+ showfun = self._pe_config_plain
+ mkhtml = "html" in opt_l
+ if "status" in opt_l:
+ showfun = self._pe_status_nohdr
+ elif mkhtml:
+ showfun = self._pe_config_noclr
+ s = self._diff(showfun, t1, t2, html=mkhtml)
+ if context.previous_level_is("cibconfig"):
+ cib_factory.refresh()
+ if s is None:
+ return False
+ if not mkhtml:
+ utils.page_string(s)
+ else:
+ sys.stdout.writelines(s)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['live'])),
+ compl.join(compl.call(lambda: crm_report().peinputs_list()),
+ compl.choice(['live'])))
+ def do_wdiff(self, context, t1, t2, *args):
+ "usage: wdiff <pe> <pe> [status]"
+ self._init_source()
+ opt_l = []
+ if not self._common_pe_render_check(context, opt_l, *args):
+ return False
+ showfun = self._pe_config_plain
+ if "status" in opt_l:
+ showfun = self._pe_status_nohdr
+ s = self._diff(showfun, t1, t2, wdiff=True)
+ if context.previous_level_is("cibconfig"):
+ cib_factory.refresh()
+ if s is None:
+ return False
+ utils.page_string(s)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.call(lambda: crm_report().session_subcmd_list()),
+ compl.call(lambda: crm_report().session_list()))
+ def do_session(self, context, subcmd=None, name=None):
+ "usage: session [{save|load|delete} <name> | pack [<name>] | update | list]"
+ self._init_source()
+ if not subcmd:
+ print("current session: %s" % self.current_session)
+ return True
+ # verify arguments
+ if subcmd not in ("save", "load", "pack", "delete", "list", "update"):
+ logger.error("unknown history session subcmd: %s", subcmd)
+ return False
+ if name:
+ if subcmd not in ("save", "load", "pack", "delete"):
+ logger_utils.syntax_err(subcmd, context='session')
+ return False
+ if not utils.is_filename_sane(name):
+ return False
+ elif subcmd not in ("list", "update", "pack"):
+ logger_utils.syntax_err(subcmd, context='session')
+ return False
+ elif subcmd in ("update", "pack") and not self.current_session:
+ logger.error("need to load a history session before update/pack")
+ return False
+ # do work
+ if not name:
+ # some commands work on the existing session
+ name = self.current_session
+ rc = crm_report().manage_session(subcmd, name)
+ # set source appropriately
+ if rc and subcmd in ("save", "load"):
+ options.history = crm_report().get_source()
+ crm_report().prepare_source()
+ self.current_session = name
+ elif rc and subcmd == "delete":
+ if name == self.current_session:
+ logger.info("current history session deleted, setting source to live")
+ self._set_source("live")
+ return rc
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(['clear']))
+ def do_exclude(self, context, arg=None):
+ "usage: exclude [<regex>|clear]"
+ self._init_source()
+ if not arg:
+ return crm_report().manage_excludes("show")
+ elif arg == "clear":
+ return crm_report().manage_excludes("clear")
+ return crm_report().manage_excludes("add", arg)
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/ui_maintenance.py b/crmsh/ui_maintenance.py
new file mode 100644
index 0000000..e0ad5de
--- /dev/null
+++ b/crmsh/ui_maintenance.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+from . import command
+from . import completers as compl
+from . import config
+from .cibconfig import cib_factory
+from . import utils
+from . import xmlutil
+
+_compl_actions = compl.choice(['start', 'stop', 'monitor', 'meta-data', 'validate-all',
+ 'promote', 'demote', 'notify', 'reload', 'migrate_from',
+ 'migrate_to', 'recover'])
+
+
+class Maintenance(command.UI):
+ '''
+ Commands that should only be run while in
+ maintenance mode.
+ '''
+ name = "maintenance"
+
+ rsc_maintenance = "crm_resource -r '%s' --meta -p maintenance -v '%s'"
+
+ def __init__(self):
+ command.UI.__init__(self)
+
+ def requires(self):
+ return cib_factory.initialize()
+
+ def _onoff(self, resource, onoff):
+ if resource is not None:
+ return utils.ext_cmd(self.rsc_maintenance % (resource, onoff)) == 0
+ else:
+ cib_factory.create_object('property', 'maintenance-mode=%s' % (onoff))
+ return cib_factory.commit()
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(cib_factory.rsc_id_list))
+ def do_on(self, context, resource=None):
+ '''
+ Enable maintenance mode (for the optional resource or for everything)
+ '''
+ return self._onoff(resource, 'true')
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.call(cib_factory.rsc_id_list))
+ def do_off(self, context, resource=None):
+ '''
+ Disable maintenance mode (for the optional resource or for everything)
+ '''
+ return self._onoff(resource, 'false')
+
+ def _in_maintenance_mode(self, obj):
+ if cib_factory.get_property("maintenance-mode") == "true":
+ return True
+ v = obj.meta_attributes("maintenance")
+ return v and all(x == 'true' for x in v)
+
+ def _runs_on_this_node(self, resource):
+ nodes = utils.running_on(resource)
+ return set(nodes) == set([utils.this_node()])
+
+ @command.skill_level('administrator')
+ @command.completers(compl.call(cib_factory.rsc_id_list), _compl_actions, compl.choice(["ssh"]))
+ def do_action(self, context, resource, action, ssh=None):
+ '''
+ Issue action out-of-band to the given resource, making
+ sure that the resource is in maintenance mode first
+ '''
+ obj = cib_factory.find_object(resource)
+ if not obj:
+ context.fatal_error("Resource not found: %s" % (resource))
+ if not xmlutil.is_resource(obj.node):
+ context.fatal_error("Not a resource: %s" % (resource))
+ if not config.core.force and not self._in_maintenance_mode(obj):
+ context.fatal_error("Not in maintenance mode.")
+
+ if ssh is None:
+ if action not in ('start', 'monitor'):
+ if not self._runs_on_this_node(resource):
+ context.fatal_error("Resource %s must be running on this node (%s)" %
+ (resource, utils.this_node()))
+
+ from . import rsctest
+ return rsctest.call_resource(obj.node, action, [utils.this_node()], local_only=True)
+ elif ssh == "ssh":
+ from . import rsctest
+ if action in ('start', 'promote', 'demote', 'recover', 'meta-data'):
+ return rsctest.call_resource(obj.node, action,
+ [utils.this_node()], local_only=True)
+ else:
+ all_nodes = cib_factory.node_id_list()
+ return rsctest.call_resource(obj.node, action, all_nodes, local_only=False)
+ else:
+ context.fatal_error("Unknown argument: %s" % (ssh))
diff --git a/crmsh/ui_node.py b/crmsh/ui_node.py
new file mode 100644
index 0000000..e842dfc
--- /dev/null
+++ b/crmsh/ui_node.py
@@ -0,0 +1,620 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import re
+import copy
+import subprocess
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
+
+from . import config
+from . import command
+from . import completers as compl
+from . import constants
+from . import ui_utils
+from . import utils
+from . import xmlutil
+from .cliformat import cli_nvpairs, nvpairs2list
+from . import term
+from .cibconfig import cib_factory
+from .sh import ShellUtils
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+def remove_redundant_attrs(objs, attributes_tag, attr, conflicting_attr = None):
+ """
+ Remove attr from all resources_tags in the cib.xml
+ """
+ field2show = "id" # if attributes_tag == "meta_attributes"
+ # By default the id of the object should be shown
+ # The id of nodes is simply an integer number => show its uname field
+ if "instance_attributes" == attributes_tag:
+ field2show = "uname"
+ # Override the resources on the node
+ for r in objs:
+ for meta_set in xmlutil.get_set_nodes(r, attributes_tag, create=False):
+ a = xmlutil.get_attr_in_set(meta_set, attr)
+ if a is not None and \
+ (config.core.manage_children == "always" or \
+ (config.core.manage_children == "ask" and
+ utils.ask("'%s' attribute already exists in %s. Remove it?" %
+ (attr, r.get(field2show))))):
+ logger.debug("force remove meta attr %s from %s", attr, r.get(field2show))
+ xmlutil.rmnode(a)
+ xmlutil.xml_processnodes(r, xmlutil.is_emptynvpairs, xmlutil.rmnodes)
+ if conflicting_attr is not None:
+ a = xmlutil.get_attr_in_set(meta_set, conflicting_attr)
+ if a is not None and \
+ (config.core.manage_children == "always" or \
+ (config.core.manage_children == "ask" and
+ utils.ask("'%s' conflicts with '%s' in %s. Remove it?" %
+ (conflicting_attr, attr, r.get(field2show))))):
+ logger.debug("force remove meta attr %s from %s", conflicting_attr, r.get(field2show))
+ xmlutil.rmnode(a)
+ xmlutil.xml_processnodes(r, xmlutil.is_emptynvpairs, xmlutil.rmnodes)
+
+def get_resources_on_nodes(nodes, resources_tags):
+ prefix = "cli-prefer-"
+ exclude = [str(x.node.get("id")).replace(prefix,"") for x in cib_factory.cib_objects
+ if x.obj_type == "location" and x.node.get("node") not in nodes]
+
+ resources = [x.node for x in cib_factory.cib_objects
+ if x.obj_type in resources_tags and x.obj_id not in exclude]
+ return resources
+
+def update_xml_node(cluster_node_name, attr, value):
+ '''
+ xml_node.attr := value
+
+ Besides, it asks the user if he wants to
+ 1) remove both the attr and conflicting_attr
+ in primitives, groups and clones
+ 2) remove the conflicting attribute in the node itself
+ '''
+
+ node_obj = cib_factory.find_node(cluster_node_name)
+ if node_obj is None:
+ logger.error("CIB is not valid!")
+ return False
+
+ logger.debug("update_xml_node: %s", node_obj.obj_id)
+
+ xml_node = node_obj.node
+ node_obj.set_updated()
+
+ conflicting_attr = ''
+ if 'maintenance' == attr:
+ conflicting_attr = 'is-managed'
+ if 'is-managed' == attr:
+ conflicting_attr = 'maintenance'
+
+ # Get all primitive, group and clone resources currently running on the cluster_node_name
+ objs = get_resources_on_nodes([cluster_node_name], [ "primitive", "group", "clone"])
+
+ # Ask the user to remove the 'attr' attributes on those primitives, groups and clones
+ remove_redundant_attrs(objs, "meta_attributes", attr, conflicting_attr)
+
+ # Remove the node conflicting attribute
+ nvpairs = xml_node.xpath("./instance_attributes/nvpair[@name='%s']" % (conflicting_attr))
+ if len(nvpairs) > 0 and \
+ utils.ask("'%s' conflicts with '%s' in %s. Remove it?" %
+ (conflicting_attr, attr, xml_node.get("uname"))):
+ for nvpair in nvpairs:
+ xmlutil.rmnode(nvpair)
+ xmlutil.xml_processnodes(xml_node, xmlutil.is_emptynvpairs, xmlutil.rmnodes)
+
+ # Set the node attribute
+ nvpairs = xml_node.xpath("./instance_attributes/nvpair[@name='%s']" % (attr))
+ if len(nvpairs) > 0:
+ for nvpair in nvpairs:
+ nvpair.set("value", value)
+ else:
+ for n in xmlutil.get_set_instace_attributes(xml_node, create=True):
+ xmlutil.set_attr(n, attr, value)
+ return True
+
+def set_node_attr(cluster_node_name, attr_name, value, commit=True):
+ """
+ Set an attribute for a node
+ """
+ if not update_xml_node(cluster_node_name, attr_name, value):
+ logger.error("Failed to update node attributes for %s", cluster_node_name)
+ return False
+
+ if not commit:
+ return True
+
+ if not cib_factory.commit():
+ logger.error("Failed to commit updates to %s", cluster_node_name)
+ return False
+ return True
+
+def _oneline(s):
+ 'join s into a single line of space-separated tokens'
+ return ' '.join(l.strip() for l in s.splitlines())
+
+
+def unpack_node_xmldata(node, is_offline):
+ """
+ takes an XML element defining a node, and
+ returns the data to pass to print_node
+ is_offline: true|false
+ """
+ typ = uname = ident = ""
+ inst_attr = []
+ other = {}
+ for attr in list(node.keys()):
+ v = node.get(attr)
+ if attr == "type":
+ typ = v
+ elif attr == "uname":
+ uname = v
+ elif attr == "id":
+ ident = v
+ else:
+ other[attr] = v
+ inst_attr = [cli_nvpairs(nvpairs2list(elem))
+ for elem in node.xpath('./instance_attributes')]
+ return uname, ident, typ, other, inst_attr, is_offline
+
+
+def _find_attr(args):
+ """
+ complete utilization/attribute/status-attr attrs
+ """
+ if not len(args) >= 2:
+ return []
+ cib = xmlutil.cibdump2elem()
+ if cib is None:
+ return []
+
+ res = []
+ if args[0] == "utilization":
+ xpath = "//nodes/node[@uname='%s']/utilization/nvpair" % args[1]
+ if args[0] == "attribute":
+ xpath = "//nodes/node[@uname='%s']/instance_attributes/nvpair" % args[1]
+ if args[0] == "status-attr":
+ xpath = "//status/node_state[@uname='%s']/\
+ transient_attributes/instance_attributes/nvpair" % args[1]
+ node_attr = cib.xpath(xpath)
+ for item in node_attr:
+ res.append(item.get("name"))
+ return res
+
+
+def print_node(uname, ident, node_type, other, inst_attr, offline):
+ """
+ Try to pretty print a node from the cib. Sth like:
+ uname(id): node_type
+ attr1=v1
+ attr2=v2
+ """
+ s_offline = offline and "(offline)" or ""
+ if not node_type:
+ node_type = "member"
+ if uname == ident:
+ print(term.render("%s: %s%s" % (uname, node_type, s_offline)))
+ else:
+ print(term.render("%s(%s): %s%s" % (uname, ident, node_type, s_offline)))
+ for a in other:
+ print(term.render("\t%s: %s" % (a, other[a])))
+ for s in inst_attr:
+ print(term.render("\t%s" % (s)))
+
+
+def parse_option_for_nodes(context, *args):
+ """
+ Parse option for nodes
+ Return a node list
+ """
+ action_type = context.get_command_name()
+ action_target = "node" if action_type in ["standby", "online"] else "cluster service"
+ action = "{} {}".format(action_type, action_target)
+ usage_template = """
+Specify node(s) on which to {action}.
+If no nodes are specified, {action} on the local node.
+If --all is specified, {action} on all nodes."""
+ addtion_usage = ""
+ if action_type == "standby":
+ usage_template += """
+\n\nAdditionally, you may specify a lifetime for the standby---if set to
+"reboot", the node will be back online once it reboots. "forever" will
+keep the node in standby after reboot. The life time defaults to
+"forever"."""
+ addtion_usage = " [lifetime]"
+
+ parser = ArgumentParser(description=usage_template.format(action=action),
+ usage="{} [--all | <node>... ]{}".format(action_type, addtion_usage),
+ add_help=False,
+ formatter_class=RawDescriptionHelpFormatter)
+ parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
+ parser.add_argument("--all", help="To {} on all nodes".format(action), action="store_true", dest="all")
+
+ options, args = parser.parse_known_args(args)
+ if options.help:
+ parser.print_help()
+ raise utils.TerminateSubCommand
+ if options is None or args is None:
+ raise utils.TerminateSubCommand
+ if options.all and args:
+ context.fatal_error("Should either use --all or specific node(s)")
+
+ # return local node
+ if not options.all and not args:
+ return [utils.this_node()]
+ member_list = utils.list_cluster_nodes()
+ if not member_list:
+ context.fatal_error("Cannot get the node list from cluster")
+ for node in args:
+ if node not in member_list:
+ context.fatal_error("Node \"{}\" is not a cluster node".format(node))
+
+ node_list = member_list if options.all else args
+ for node in node_list:
+ try:
+ utils.ping_node(node)
+ except ValueError as err:
+ logger.warning(str(err))
+ node_list.remove(node)
+ return node_list
+
+
+class NodeMgmt(command.UI):
+ '''
+ Nodes management class
+ '''
+ name = "node"
+
+ node_standby = "crm_attribute -t nodes -N '%s' -n standby -v '%s' %s"
+ node_maint = "crm_attribute -t nodes -N '%s' -n maintenance -v '%s'"
+ node_delete = """cibadmin -D -o nodes -X '<node uname="%s"/>'"""
+ node_delete_status = """cibadmin -D -o status -X '<node_state uname="%s"/>'"""
+ node_cleanup_resources = "crm_resource --cleanup --node '%s'"
+ node_clear_state = _oneline("""cibadmin %s
+ -o status --xml-text
+ '<node_state id="%s"
+ uname="%s"
+ ha="active"
+ in_ccm="false"
+ crmd="offline"
+ join="member"
+ expected="down"
+ crm-debug-origin="manual_clear"
+ shutdown="0"
+ />'""")
+ node_clear_state_118 = "stonith_admin --confirm %s"
+ hb_delnode = config.path.hb_delnode + " '%s'"
+ crm_node = "crm_node"
+ node_fence = "crm_attribute -t status -N '%s' -n terminate -v true"
+ dc = "crmadmin -D"
+ node_attr = {
+ 'set': "crm_attribute -t nodes -N '%s' -n '%s' -v '%s'",
+ 'delete': "crm_attribute -D -t nodes -N '%s' -n '%s'",
+ 'show': "crm_attribute -G -t nodes -N '%s' -n '%s'",
+ }
+ node_status = {
+ 'set': "crm_attribute -t status -N '%s' -n '%s' -v '%s'",
+ 'delete': "crm_attribute -D -t status -N '%s' -n '%s'",
+ 'show': "crm_attribute -G -t status -N '%s' -n '%s'",
+ }
+ node_utilization = {
+ 'set': "crm_attribute -z -t nodes -N '%s' -n '%s' -v '%s'",
+ 'delete': "crm_attribute -z -D -t nodes -N '%s' -n '%s'",
+ 'show': "crm_attribute -z -G -t nodes -N '%s' -n '%s'",
+ }
+
+ def requires(self):
+ for p in ('cibadmin', 'crm_attribute'):
+ if not utils.is_program(p):
+ logger_utils.no_prog_err(p)
+ return False
+ return True
+
+ @command.alias('list')
+ @command.completers(compl.nodes)
+ def do_show(self, context, node=None):
+ 'usage: show [<node>]'
+ cib = xmlutil.cibdump2elem()
+ if cib is None:
+ return False
+
+ cfg_nodes = cib.xpath('/cib/configuration/nodes/node')
+ node_states = cib.xpath('/cib/status/node_state')
+
+ def find(it, lst):
+ for n in lst:
+ if n.get("uname") == it:
+ return n
+ return None
+
+ def do_print(uname):
+ xml = find(uname, cfg_nodes)
+ state = find(uname, node_states)
+ if xml is not None or state is not None:
+ is_offline = state is not None and state.get("crmd") == "offline"
+ print_node(*unpack_node_xmldata(xml if xml is not None else state, is_offline))
+
+ if node is not None:
+ do_print(node)
+ else:
+ all_nodes = set([n.get("uname") for n in cfg_nodes + node_states])
+ for uname in sorted(all_nodes):
+ do_print(uname)
+ return True
+
+ @command.wait
+ @command.completers(compl.online_nodes)
+ def do_standby(self, context, *args):
+ """
+ usage: standby [<node>] [<lifetime>]
+ To avoid race condition for --all option, melt all standby values into one cib replace session
+ """
+ # Parse lifetime option
+ lifetime_opt = "forever"
+ lifetime = utils.fetch_lifetime_opt(list(args), iso8601=False)
+ if lifetime:
+ lifetime_opt = lifetime
+ args = args[:-1]
+
+ # Parse node option
+ node_list = parse_option_for_nodes(context, *args)
+ if not node_list:
+ return
+
+ # For default "forever" lifetime, under "nodes" section
+ xml_path = constants.XML_NODE_PATH
+ xml_query_path = constants.XML_NODE_QUERY_STANDBY_PATH
+ xml_query_path_oppsite = constants.XML_STATUS_QUERY_STANDBY_PATH
+ # For "reboot" lifetime, under "status" section
+ if lifetime_opt == "reboot":
+ xml_path = constants.XML_STATUS_PATH
+ xml_query_path = constants.XML_STATUS_QUERY_STANDBY_PATH
+ xml_query_path_oppsite = constants.XML_NODE_QUERY_STANDBY_PATH
+
+ cib = xmlutil.cibdump2elem()
+ # IMPORTANT:
+ # Do NOT call cibdump2elem twice, or you risk a race where the
+ # resulting diff will contain more changes than the values for
+ # "standby", potentially rolling back the effect of other operations.
+ # Really use the same xml as "original" and basis for the changes.
+ # Thus the "deepcopy" here.
+ #
+ # Possible optimization: instead of deepcopy here and xml_tostring
+ # below and str2tmp in diff_and_patch you probably want to change
+ # diff_and_patch to accept a file (as well), then
+ # from . import tmpfiles
+ # orig_cib_tmpfile = xmlutil.cibdump2tmp()
+ # tmpfiles.add(orig_cib_tmpfile)
+ # cib = xmlutil.file2cib_elem(orig_cib_tmpfile)
+ # ...
+ # diff_and_patch(orig_file=orig_cib_tmpfile, new_str=xmlutil.xml_tostring(cib))
+ orig_cib = copy.deepcopy(cib)
+
+ xml_item_list = cib.xpath(xml_path)
+ for xml_item in xml_item_list:
+ if xml_item.get("uname") in node_list:
+ node_id = xml_item.get('id')
+ # Remove possible oppsite lifetime standby nvpair
+ item_to_del = cib.xpath(xml_query_path_oppsite.format(node_id=node_id))
+ if item_to_del:
+ xmlutil.rmnodes(item_to_del)
+ # If the standby nvpair already exists, set and continue
+ item = cib.xpath(xml_query_path.format(node_id=node_id))
+ if item and item[0].get("value") != "on":
+ item[0].set("value", "on")
+ continue
+ # Create standby nvpair
+ interface_item = xml_item
+ if lifetime_opt == "reboot":
+ res_item = xmlutil.get_set_nodes(xml_item, "transient_attributes", create=True)
+ interface_item = res_item[0]
+ res_item = xmlutil.get_set_nodes(interface_item, "instance_attributes", create=True)
+ xmlutil.set_attr(res_item[0], "standby", "on")
+
+ rc = utils.diff_and_patch(xmlutil.xml_tostring(orig_cib), xmlutil.xml_tostring(cib))
+ if not rc:
+ return False
+ for node in node_list:
+ logger.info("standby node %s", node)
+
+ @command.wait
+ @command.completers(compl.standby_nodes)
+ def do_online(self, context, *args):
+ """
+ usage: online [<node>]
+ To avoid race condition for --all option, melt all online values into one cib replace session
+ """
+ # Parse node option
+ node_list = parse_option_for_nodes(context, *args)
+ if not node_list:
+ return
+
+ cib = xmlutil.cibdump2elem()
+ # IMPORTANT: Do NOT call cibdump2elem twice, or you risk a race.
+ # Really use the same xml as "original" and basis for the changes.
+ # Thus the "deepcopy" here; see also do_standby().
+ orig_cib = copy.deepcopy(cib)
+ for node in node_list:
+ node_id = utils.get_nodeid_from_name(node)
+ for query_path in [constants.XML_NODE_QUERY_STANDBY_PATH, constants.XML_STATUS_QUERY_STANDBY_PATH]:
+ item = cib.xpath(query_path.format(node_id=node_id))
+ if item and item[0].get("value") != "off":
+ item[0].set("value", "off")
+
+ rc = utils.diff_and_patch(xmlutil.xml_tostring(orig_cib), xmlutil.xml_tostring(cib))
+ if not rc:
+ return False
+ for node in node_list:
+ logger.info("online node %s", node)
+
+ @command.wait
+ @command.completers(compl.nodes)
+ def do_maintenance(self, context, node=None):
+ 'usage: maintenance [<node>]'
+ if not node:
+ node = utils.this_node()
+ if not utils.is_name_sane(node):
+ return False
+ return self._commit_node_attr(context, node, "maintenance", "true")
+
+
+ @command.wait
+ @command.completers(compl.nodes)
+ def do_ready(self, context, node=None):
+ 'usage: ready [<node>]'
+ if not node:
+ node = utils.this_node()
+ if not utils.is_name_sane(node):
+ return False
+ return utils.ext_cmd(self.node_maint % (node, "off")) == 0
+
+ @command.wait
+ @command.completers(compl.nodes)
+ def do_fence(self, context, node):
+ 'usage: fence <node>'
+ if not utils.is_name_sane(node):
+ return False
+ if not config.core.force and \
+ not utils.ask("Fencing %s will shut down the node and migrate any resources that are running on it! Do you want to fence %s?" % (node, node)):
+ return False
+ if xmlutil.is_remote_node(node):
+ return utils.ext_cmd("stonith_admin -F '%s'" % (node)) == 0
+ else:
+ return utils.ext_cmd(self.node_fence % (node)) == 0
+
+ @command.wait
+ @command.completers(compl.nodes)
+ def do_clearstate(self, context, node=None):
+ 'usage: clearstate <node>'
+ if not node:
+ node = utils.this_node()
+ if not utils.is_name_sane(node):
+ return False
+ if not config.core.force and \
+ not utils.ask("Do you really want to drop state for node %s?" % node):
+ return False
+ if utils.is_larger_than_pcmk_118():
+ cib_elem = xmlutil.cibdump2elem()
+ if cib_elem is None:
+ return False
+ if cib_elem.xpath("//node_state[@uname=\"%s\"]/@crmd" % node) == ["online"]:
+ return utils.ext_cmd(self.node_cleanup_resources % node) == 0
+ elif cib_elem.xpath("//node_state[@uname=\"%s\"]/@in_ccm" % node) == ["true"]:
+ logger.warning("Node is offline according to Pacemaker, but online according to corosync. First shut down node '%s'", node)
+ return False
+ else:
+ return utils.ext_cmd(self.node_clear_state_118 % node) == 0
+ else:
+ return utils.ext_cmd(self.node_clear_state % ("-M -c", node, node)) == 0 and \
+ utils.ext_cmd(self.node_clear_state % ("-R", node, node)) == 0
+
+ @classmethod
+ def call_delnode(cls, node):
+ "Remove node (how depends on cluster stack)"
+ rc = True
+ ec, s = ShellUtils().get_stdout("%s -p" % cls.crm_node)
+ if not s:
+ logger.error('%s -p could not list any nodes (rc=%d)', cls.crm_node, ec)
+ rc = False
+ else:
+ partition_l = s.split()
+ if node in partition_l:
+ logger.error("according to %s, node %s is still active", cls.crm_node, node)
+ rc = False
+ cmd = "%s --force -R %s" % (cls.crm_node, node)
+ if not rc:
+ if config.core.force:
+ logger.info('proceeding with node %s removal', node)
+ else:
+ return False
+ ec = utils.ext_cmd(cmd)
+ if ec != 0:
+ node_xpath = "//nodes/node[@uname='{}']".format(node)
+ cmd = 'cibadmin --delete-all --force --xpath "{}"'.format(node_xpath)
+ rc, _, err = ShellUtils().get_stdout_stderr(cmd)
+ if rc != 0:
+ logger.error('"%s" failed, rc=%d, %s', cmd, rc, err)
+ return False
+ if utils.ext_cmd(cls.node_delete % node) != 0 or \
+ utils.ext_cmd(cls.node_delete_status % node) != 0:
+ logger.error("%s removed from membership, but not from CIB!", node)
+ return False
+ return True
+
+ @command.completers(compl.nodes)
+ def do_delete(self, context, node):
+ 'usage: delete <node>'
+ logger.warning('`crm node delete` is deprecated and will very likely be dropped in the near future. It is auto-replaced as `crm cluster remove -c {}`.'.format(node))
+ if config.core.force:
+ rc = subprocess.call(['crm', 'cluster', 'remove', '-F', '-c', node])
+ else:
+ rc = subprocess.call(['crm', 'cluster', 'remove', '-c', node])
+ return rc == 0
+
+ @command.wait
+ @command.completers(compl.nodes, compl.choice(['set', 'delete', 'show']), _find_attr)
+ def do_attribute(self, context, node, cmd, attr, value=None):
+ """usage:
+ attribute <node> set <attr> <value>
+ attribute <node> delete <attr>
+ attribute <node> show <attr>"""
+ return ui_utils.manage_attr(context.get_command_name(), self.node_attr,
+ node, cmd, attr, value)
+
+ @command.wait
+ @command.completers(compl.nodes, compl.choice(['set', 'delete', 'show']), _find_attr)
+ def do_utilization(self, context, node, cmd, attr, value=None):
+ """usage:
+ utilization <node> set <attr> <value>
+ utilization <node> delete <attr>
+ utilization <node> show <attr>"""
+ return ui_utils.manage_attr(context.get_command_name(), self.node_utilization,
+ node, cmd, attr, value)
+
+ @command.wait
+ @command.name('status-attr')
+ @command.completers(compl.nodes, compl.choice(['set', 'delete', 'show']), _find_attr)
+ def do_status_attr(self, context, node, cmd, attr, value=None):
+ """usage:
+ status-attr <node> set <attr> <value>
+ status-attr <node> delete <attr>
+ status-attr <node> show <attr>"""
+ return ui_utils.manage_attr(context.get_command_name(), self.node_status,
+ node, cmd, attr, value)
+
+ def _commit_node_attr(self, context, node_name, attr_name, value):
+ """
+ Perform change to resource
+ """
+ if not utils.is_name_sane(node_name):
+ return False
+ commit = not cib_factory.has_cib_changed()
+ if not commit:
+ context.info("Currently editing the CIB, changes will not be committed")
+ return set_node_attr(node_name, attr_name, value, commit=commit)
+
+ def do_server(self, context, *nodes):
+ """
+ usage:
+ server -- print server hostname / address for each node
+ server <node> ... -- print server hostname / address for node
+ """
+ cib = xmlutil.cibdump2elem()
+ if cib is None:
+ return False
+ for node in cib.xpath('/cib/configuration/nodes/node'):
+ if nodes and node not in nodes:
+ continue
+ name = node.get('uname') or node.get('id')
+ if node.get('type') == 'remote':
+ srv = cib.xpath("//primitive[@id='%s']/instance_attributes/nvpair[@name='server']" % (name))
+ if srv:
+ print(srv[0].get('value'))
+ continue
+ print(name)
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/ui_options.py b/crmsh/ui_options.py
new file mode 100644
index 0000000..72b743b
--- /dev/null
+++ b/crmsh/ui_options.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+from . import command
+from . import completers
+from . import config
+from . import options
+
+_yesno = completers.choice(['yes', 'no'])
+
+_legacy_map = {
+ 'editor': ('core', 'editor'),
+ 'pager': ('core', 'pager'),
+ 'user': ('core', 'user'),
+ 'skill_level': ('core', 'skill_level'),
+ 'sort_elements': ('core', 'sort_elements'),
+ 'check_frequency': ('core', 'check_frequency'),
+ 'check_mode': ('core', 'check_mode'),
+ 'wait': ('core', 'wait'),
+ 'add_quotes': ('core', 'add_quotes'),
+ 'manage_children': ('core', 'manage_children'),
+ 'force': ('core', 'force'),
+ 'debug': ('core', 'debug'),
+ 'ptest': ('core', 'ptest'),
+ 'dotty': ('core', 'dotty'),
+ 'dot': ('core', 'dot'),
+ 'output': ('color', 'style'),
+ 'colorscheme': ('color', 'colorscheme'),
+}
+
+
+def _legacy_set_pref(name, value):
+ 'compatibility with old versions'
+ name = name.replace('-', '_')
+ if name == 'colorscheme':
+ return # TODO
+ opt = _legacy_map.get(name)
+ if opt:
+ config.set_option(opt[0], opt[1], value)
+
+
+def _getprefs(opt):
+ 'completer for legacy options'
+ opt = opt.replace('-', '_')
+ if opt == 'colorscheme':
+ return ('black', 'blue', 'green', 'cyan',
+ 'red', 'magenta', 'yellow', 'white', 'normal')
+ opt = _legacy_map.get(opt)
+ if opt:
+ return config.complete(*opt)
+ return []
+
+
+def _set_completer(args):
+ opt = args[-1]
+ opts = opt.split('.')
+ if len(opts) != 2:
+ return []
+ return config.complete(*opts)
+
+
+class CliOptions(command.UI):
+ '''
+ Manage user preferences
+ '''
+ name = "options"
+
+ @command.completers(completers.choice(config.get_all_options()), _set_completer)
+ def do_set(self, context, option, value):
+ '''usage: set <option> <value>'''
+ parts = option.split('.')
+ if len(parts) != 2:
+ context.fatal_error("Unknown option: " + option)
+ config.set_option(parts[0], parts[1], value)
+
+ @command.name('skill-level')
+ @command.alias('skill_level')
+ @command.completers(_getprefs('skill_level'))
+ def do_skill_level(self, context, level):
+ """usage: skill-level <level>
+ level: operator | administrator | expert"""
+ return _legacy_set_pref('skill-level', level)
+
+ def do_editor(self, context, program):
+ "usage: editor <program>"
+ return _legacy_set_pref('editor', program)
+
+ def do_pager(self, context, program):
+ "usage: pager <program>"
+ return _legacy_set_pref('pager', program)
+
+ def do_user(self, context, crm_user=''):
+ "usage: user [<crm_user>]"
+ return _legacy_set_pref('user', crm_user)
+
+ @command.completers(_getprefs('output'))
+ def do_output(self, context, output_type):
+ "usage: output <type>"
+ _legacy_set_pref("output", output_type)
+ from . import term
+ term.init()
+
+ def do_colorscheme(self, context, colors):
+ "usage: colorscheme <colors>"
+ return _legacy_set_pref("colorscheme", colors)
+
+ @command.name('check-frequency')
+ @command.alias('check_frequency')
+ @command.completers(_getprefs('check_frequency'))
+ def do_check_frequency(self, context, freq):
+ "usage: check-frequency <freq>"
+ return _legacy_set_pref("check-frequency", freq)
+
+ @command.name('check-mode')
+ @command.alias('check_mode')
+ @command.completers(_getprefs('check_mode'))
+ def do_check_mode(self, context, mode):
+ "usage: check-mode <mode>"
+ return _legacy_set_pref("check-mode", mode)
+
+ @command.name('sort-elements')
+ @command.alias('sort_elements')
+ @command.completers(_yesno)
+ def do_sort_elements(self, context, opt):
+ "usage: sort-elements {yes|no}"
+ return _legacy_set_pref("sort-elements", opt)
+
+ @command.completers(_yesno)
+ def do_wait(self, context, opt):
+ "usage: wait {yes|no}"
+ return _legacy_set_pref("wait", opt)
+
+ @command.name('add-quotes')
+ @command.alias('add_quotes')
+ @command.completers(_yesno)
+ def do_add_quotes(self, context, opt):
+ "usage: add-quotes {yes|no}"
+ return _legacy_set_pref("add-quotes", opt)
+
+ @command.name('manage-children')
+ @command.alias('manage_children')
+ @command.completers(_getprefs('manage_children'))
+ def do_manage_children(self, context, opt):
+ "usage: manage-children <option>"
+ return _legacy_set_pref("manage-children", opt)
+
+ @command.alias('list')
+ @command.completers(completers.choice(config.get_all_options()))
+ def do_show(self, context, option=None):
+ "usage: show [all | <option>]"
+ from . import utils
+ opts = config.get_configured_options() if option is None else config.get_all_options()
+
+ def show_options(fn):
+ s = ''
+ for opt in opts:
+ if fn(opt):
+ parts = opt.split('.')
+ val = (opt, config.get_option(parts[0], parts[1], raw=True))
+ s += "%s = %s\n" % val
+ utils.page_string(s)
+
+ if option == 'all' or option is None:
+ show_options(lambda o: True)
+ else:
+ show_options(lambda o: o.startswith(option) or o.endswith(option))
+
+ def do_save(self, context):
+ "usage: save"
+ config.save()
+
+ def do_reset(self, context):
+ "usage: reset"
+ config.reset()
+
+ def end_game(self, no_questions_asked=False):
+ if no_questions_asked and not options.interactive:
+ self.do_save(None)
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/ui_ra.py b/crmsh/ui_ra.py
new file mode 100644
index 0000000..b6471bb
--- /dev/null
+++ b/crmsh/ui_ra.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import re
+from . import command
+from . import completers as compl
+from . import utils
+from . import ra
+from . import constants
+from . import options
+
+
+def complete_class_provider_type(args):
+ '''
+ When need to show the agent supported by cluster,
+ grouped by different classes or providers, Tab by Tab,
+ instead of completing all the agents at one time.
+
+ First completion: agent classes
+ Second completion: providers of ocf or types of other classes
+ Last completion: types of ocf agents
+ '''
+ c = args[-1]
+ classes = ra.ra_classes()
+ if not c:
+ # First completion: agent classes
+ return [l+":" for l in classes]
+
+ ret = set([])
+ providers = ra.ra_providers_all('ocf')
+ if c == 'ocf:' or re.search(r'ocf:[^:]+$', c):
+ # Second completion: providers of ocf
+ for p in providers:
+ if p == '.isolation':
+ continue
+ ret.add('%s:%s:' % ('ocf', p))
+ return list(ret)
+
+ if re.search(r'ocf:.+:', c):
+ # Last completion: types of ocf agents
+ for p in providers:
+ if p == '.isolation':
+ continue
+ types = ra.ra_types('ocf', p)
+ for t in types:
+ ret.add('%s:%s:%s' % ('ocf', p, t))
+ return list(ret)
+
+ if c.endswith(':') and c.strip(':') in classes:
+ # Second completion: types of other classes
+ types = ra.ra_types(c)
+ for t in types:
+ ret.add('%s:%s' % (c.strip(':'), t))
+ return list(ret)
+
+ if re.search(r'.+:.+', c):
+ # Second completion: types of other classes
+ types = ra.ra_types(c.split(':')[0])
+ for t in types:
+ ret.add('%s:%s' % (c.split(':')[0], t))
+ return list(ret)
+
+ # First completion: agent classes
+ return [l+":" for l in classes]
+
+class RA(command.UI):
+ '''
+ CIB shadow management class
+ '''
+ name = "ra"
+ provider_classes = ["ocf"]
+
+ def do_classes(self, context):
+ "usage: classes"
+ for c in ra.ra_classes():
+ if c in self.provider_classes:
+ providers = ra.ra_providers_all(c)
+ if providers:
+ print("%s / %s" % (c, ' '.join(providers)))
+ else:
+ print("%s" % c)
+
+ @command.skill_level('administrator')
+ def do_providers(self, context, ra_type, ra_class="ocf"):
+ "usage: providers <ra> [<class>]"
+ print(' '.join(ra.ra_providers(ra_type, ra_class)))
+
+ @command.skill_level('administrator')
+ @command.completers(compl.call(ra.ra_classes), lambda args: ra.ra_providers_all(args[1]))
+ def do_list(self, context, class_, provider_=None):
+ "usage: list <class> [<provider>]"
+ if class_ not in ra.ra_classes():
+ context.fatal_error("class %s does not exist" % class_)
+ if provider_ and provider_ not in ra.ra_providers_all(class_):
+ context.fatal_error("there is no provider %s for class %s" % (provider_, class_))
+ types = ra.ra_types(class_, provider_)
+ if options.regression_tests:
+ for t in types:
+ print(t)
+ else:
+ utils.multicolumn(types)
+
+ @command.skill_level('administrator')
+ @command.alias('meta')
+ @command.completers(complete_class_provider_type)
+ def do_info(self, context, *args):
+ "usage: info [<class>:[<provider>:]]<type>"
+ if len(args) == 0:
+ context.fatal_error("Expected [<class>:[<provider>:]]<type>")
+ elif len(args) > 1: # obsolete syntax
+ if len(args) < 3:
+ ra_type, ra_class, ra_provider = args[0], args[1], "heartbeat"
+ else:
+ ra_type, ra_class, ra_provider = args[0], args[1], args[2]
+ elif args[0] in constants.meta_progs:
+ ra_class, ra_provider, ra_type = args[0], None, None
+ elif args[0] in constants.meta_progs_20:
+ ra_class, ra_provider, ra_type = args[0], None, None
+ else:
+ ra_class, ra_provider, ra_type = ra.disambiguate_ra_type(args[0])
+ agent = ra.RAInfo(ra_class, ra_type, ra_provider)
+ if agent.mk_ra_node() is None:
+ return False
+ try:
+ utils.page_string(agent.meta_pretty())
+ except Exception as msg:
+ context.fatal_error(msg)
+
+ @command.skill_level('administrator')
+ def do_validate(self, context, agentname, *params):
+ "usage: validate [<class>:[<provider>:]]<type> [<key>=<value> ...]"
+ rc, _ = ra.validate_agent(agentname, dict([param.split('=', 1) for param in params]), log=True)
+ return rc == 0
diff --git a/crmsh/ui_resource.py b/crmsh/ui_resource.py
new file mode 100644
index 0000000..e7d8429
--- /dev/null
+++ b/crmsh/ui_resource.py
@@ -0,0 +1,796 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013-2018 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+from . import command
+from . import completers as compl
+from . import constants
+from . import config
+from . import utils
+from . import xmlutil
+from . import ui_utils
+from . import options
+from .cibconfig import cib_factory
+from .sh import ShellUtils
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+def rm_meta_attribute(node, attr, l, force_children=False):
+ '''
+ Build a list of nvpair nodes which contain attribute
+ (recursively in all children resources)
+ '''
+ for c in node.iterchildren():
+ if c.tag == "meta_attributes":
+ nvpair = xmlutil.get_attr_in_set(c, attr)
+ if nvpair is not None:
+ l.append(nvpair)
+ elif force_children or \
+ (xmlutil.is_child_rsc(c) and not c.getparent().tag == "group"):
+ rm_meta_attribute(c, attr, l, force_children=force_children)
+
+def get_children_with_attr(node, attr):
+ l = []
+ for p in node.xpath(".//primitive"):
+ diff_attr = False
+ for meta_set in xmlutil.get_set_nodes(p, "meta_attributes", create=False):
+ p_value = xmlutil.get_attr_value(meta_set, attr)
+ if p_value is not None:
+ diff_attr = True
+ break
+ if diff_attr:
+ l.append(p)
+ return l
+
+def set_deep_meta_attr_node(target_node, attr, value):
+ nvpair_l = []
+ conflicting_attr = ''
+ if 'maintenance' == attr:
+ conflicting_attr = 'is-managed'
+ if 'is-managed' == attr:
+ conflicting_attr = 'maintenance'
+ if xmlutil.is_clone(target_node):
+ for c in target_node.iterchildren():
+ if xmlutil.is_child_rsc(c):
+ rm_meta_attribute(c, attr, nvpair_l)
+ if config.core.manage_children != "never" and \
+ (xmlutil.is_group(target_node) or
+ (xmlutil.is_clone(target_node) and xmlutil.cloned_el(target_node) == "group")):
+ # maintenance and is-managed attributes conflict,
+ # ask the user if he wants to leave both.
+ if '' != conflicting_attr:
+ odd_children = get_children_with_attr(target_node, conflicting_attr)
+ for c in odd_children:
+ if config.core.manage_children == "always" or \
+ (config.core.manage_children == "ask" and
+ utils.ask("'%s' conflicts with '%s' attribute. Remove '%s' for child resource %s?" %
+ (attr, conflicting_attr, conflicting_attr, c.get("id")))):
+ logger.debug("force remove meta attr %s from %s", conflicting_attr, c.get("id"))
+ rm_meta_attribute(c, conflicting_attr, nvpair_l, force_children=True)
+ # remove the same attributes from all children (if wanted)
+ odd_children = get_children_with_attr(target_node, attr)
+ for c in odd_children:
+ if config.core.manage_children == "always" or \
+ (config.core.manage_children == "ask" and
+ utils.ask("Do you want to override '%s' for child resource %s?" %
+ (attr, c.get("id")))):
+ logger.debug("force remove meta attr %s from %s", attr, c.get("id"))
+ rm_meta_attribute(c, attr, nvpair_l, force_children=True)
+ xmlutil.rmnodes(list(set(nvpair_l)))
+ if '' != conflicting_attr:
+ nvpairs = target_node.xpath("./meta_attributes/nvpair[@name='%s']" % (conflicting_attr))
+ if len(nvpairs) > 0:
+ if (utils.ask("'%s' conflicts with '%s' attribute. Remove '%s' for resource %s?" %
+ (attr, conflicting_attr, conflicting_attr, target_node.get("id")))):
+ xmlutil.rmnodes(list(set(nvpairs)))
+ xmlutil.xml_processnodes(target_node,
+ xmlutil.is_emptynvpairs, xmlutil.rmnodes)
+
+ # work around issue with pcs interoperability
+ # by finding exising nvpairs -- if there are any, just
+ # set the value in those. Otherwise fall back to adding
+ # to all meta_attributes tags
+ nvpairs = target_node.xpath("./meta_attributes/nvpair[@name='%s']" % (attr))
+ if len(nvpairs) > 0:
+ for nvpair in nvpairs:
+ nvpair.set("value", value)
+ else:
+ for n in xmlutil.get_set_nodes(target_node, "meta_attributes", create=True):
+ xmlutil.set_attr(n, attr, value)
+ return True
+
+
+def set_deep_meta_attr(rsc, attr, value, commit=True):
+ """
+ If the referenced rsc is a primitive that belongs to a group,
+ then set its attribute.
+ Otherwise, go up to the topmost resource which contains this
+ resource and set the attribute there (i.e. if the resource is
+ cloned).
+ If it's a group then check its children. If any of them has
+ the attribute set to a value different from the one given,
+ then ask the user whether to reset them or not (exact
+ behaviour depends on the value of config.core.manage_children).
+ """
+
+ def update_obj(obj):
+ """
+ set the meta attribute in the given object
+ """
+ node = obj.node
+ obj.set_updated()
+ if not (node.tag == "primitive" and
+ node.getparent().tag == "group"):
+ node = xmlutil.get_topmost_rsc(node)
+ return set_deep_meta_attr_node(node, attr, value)
+
+ def flatten(objs):
+ for obj in objs:
+ if isinstance(obj, list):
+ for subobj in obj:
+ yield subobj
+ else:
+ yield obj
+
+ def resolve(obj):
+ if obj.obj_type == 'tag':
+ ret = [cib_factory.find_object(o) for o in obj.node.xpath('./obj_ref/@id')]
+ ret = [r for r in ret if r is not None]
+ return ret
+ return obj
+
+ def is_resource(obj):
+ return xmlutil.is_resource(obj.node)
+
+ objs = cib_factory.find_objects(rsc)
+ if objs is None:
+ logger.error("CIB is not valid!")
+ return False
+ while any(obj for obj in objs if obj.obj_type == 'tag'):
+ objs = list(flatten(resolve(obj) for obj in objs))
+ objs = list(filter(is_resource, objs))
+ logger.debug("set_deep_meta_attr: %s", ', '.join([obj.obj_id for obj in objs]))
+ if not objs:
+ logger.error("Resource not found: %s", rsc)
+ return False
+
+ ok = all(update_obj(obj) for obj in objs)
+ if not ok:
+ logger.error("Failed to update meta attributes for %s", rsc)
+ return False
+
+ if not commit:
+ return True
+
+ ok = cib_factory.commit()
+ if not ok:
+ logger.error("Failed to commit updates to %s", rsc)
+ return False
+ return True
+
+
+_attrcmds = compl.choice(['delete', 'set', 'show'])
+_raoperations = compl.choice(constants.ra_operations)
+
+
+class RscMgmt(command.UI):
+ '''
+ Resources management class
+ '''
+ name = "resource"
+
+ rsc_status_all = "crm_resource --list"
+ rsc_status = "crm_resource --locate --resource '%s'"
+ rsc_showxml = "crm_resource --query-xml --resource '%s'"
+ rsc_setrole = "crm_resource --meta --resource '%s' --set-parameter target-role --parameter-value '%s'"
+ rsc_migrate = "crm_resource --quiet --move --resource '%s' %s"
+ rsc_unmigrate = "crm_resource --quiet --clear --resource '%s'"
+ rsc_ban = "crm_resource --ban --resource '%s' %s"
+ rsc_maintenance = "crm_resource --resource '%s' --meta --set-parameter maintenance --parameter-value '%s'"
+ rsc_param = {
+ 'set': "crm_resource --resource '%s' --set-parameter '%s' --parameter-value '%s'",
+ 'delete': "crm_resource --resource '%s' --delete-parameter '%s'",
+ 'show': "crm_resource --resource '%s' --get-parameter '%s'",
+ 'get': "crm_resource --resource '%s' --get-parameter '%s'",
+ }
+ rsc_meta = {
+ 'set': "crm_resource --meta --resource '%s' --set-parameter '%s' --parameter-value '%s'",
+ 'delete': "crm_resource --meta --resource '%s' --delete-parameter '%s'",
+ 'show': "crm_resource --meta --resource '%s' --get-parameter '%s'",
+ 'get': "crm_resource --meta --resource '%s' --get-parameter '%s'",
+ }
+ rsc_failcount = {
+ 'set': "crm_attribute -t status -n 'fail-count-%s' -N '%s' -v '%s'",
+ 'set_p': "crm_attribute -t status -P 'fail-count-%s' -N '%s' -v '%s'",
+ 'delete': "crm_failcount -D -r %s -N %s",
+ 'show': "crm_failcount -G -r %s -N %s",
+ 'get': "crm_failcount -G -r %s -N %s",
+ }
+ rsc_utilization = {
+ 'set': "crm_resource --utilization --resource '%s' --set-parameter '%s' --parameter-value '%s'",
+ 'delete': "crm_resource --utilization --resource '%s' --delete-parameter '%s'",
+ 'show': "crm_resource --utilization --resource '%s' --get-parameter '%s'",
+ 'get': "crm_resource --utilization --resource '%s' --get-parameter '%s'",
+ }
+ rsc_secret = {
+ 'set': "cibsecret set '%s' '%s' '%s'",
+ 'stash': "cibsecret stash '%s' '%s'",
+ 'unstash': "cibsecret unstash '%s' '%s'",
+ 'delete': "cibsecret delete '%s' '%s'",
+ 'show': "cibsecret get '%s' '%s'",
+ 'get': "cibsecret get '%s' '%s'",
+ 'check': "cibsecret check '%s' '%s'",
+ }
+
+ def _refresh_cleanup(self, action, rsc, node, force):
+ """
+ Implements the refresh and cleanup commands.
+ """
+ if rsc == "force":
+ rsc, force = None, True
+ if node == "force":
+ node, force = None, True
+ cmd = ["crm_resource", "--" + action]
+ if rsc:
+ if not utils.is_name_sane(rsc):
+ return False
+ cmd.append("--resource")
+ cmd.append(rsc)
+ if node:
+ if not utils.is_name_sane(node):
+ return False
+ cmd.append("--node")
+ cmd.append(node)
+ if force:
+ cmd.append("--force")
+ return utils.ext_cmd(" ".join(cmd)) == 0
+
+ def requires(self):
+ for program in ('crm_resource', 'crm_attribute'):
+ if not utils.is_program(program):
+ logger_utils.no_prog_err(program)
+ return False
+ return True
+
+ @command.alias('show', 'list')
+ @command.completers(compl.resources)
+ def do_status(self, context, *resources):
+ "usage: status [<rsc> ...]"
+ if len(resources) > 0:
+ rc = True
+ for rsc in resources:
+ if not utils.is_name_sane(rsc):
+ return False
+ rc = rc and (utils.ext_cmd(self.rsc_status % rsc) == 0)
+ return rc
+ else:
+ return utils.ext_cmd(self.rsc_status_all) == 0
+
+ def _commit_meta_attr(self, context, rsc, name, value):
+ """
+ Perform change to resource
+ """
+ if not utils.is_name_sane(rsc):
+ return False
+ commit = not cib_factory.has_cib_changed()
+ if not commit:
+ context.info("Currently editing the CIB, changes will not be committed")
+ return set_deep_meta_attr(rsc, name, value, commit=commit)
+
+ def _commit_meta_attrs(self, context, resources, name, value):
+ """
+ Perform change to list of resources
+ """
+ for rsc in resources:
+ if not utils.is_name_sane(rsc):
+ return False
+ commit = not cib_factory.has_cib_changed()
+ if not commit:
+ context.info("Currently editing the CIB, changes will not be committed")
+
+ rc = True
+ for rsc in resources:
+ rc = rc and set_deep_meta_attr(rsc, name, value, commit=False)
+ if commit and rc:
+ ok = cib_factory.commit()
+ if not ok:
+ logger.error("Failed to commit updates to %s", rsc)
+ return ok
+ return rc
+
+ @command.wait
+ @command.completers(compl.resources_stopped)
+ def do_start(self, context, *resources):
+ "usage: start <rsc> [<rsc> ...]"
+ if len(resources) == 0:
+ context.error("Expected at least one resource as argument")
+ return self._commit_meta_attrs(context, resources, "target-role", "Started")
+
+ @command.wait
+ @command.completers(compl.resources_started)
+ def do_stop(self, context, *resources):
+ "usage: stop <rsc> [<rsc> ...]"
+ if len(resources) == 0:
+ context.error("Expected at least one resource as argument")
+ return self._commit_meta_attrs(context, resources, "target-role", "Stopped")
+
+ @command.wait
+ @command.completers(compl.resources)
+ def do_restart(self, context, *resources):
+ "usage: restart <rsc> [<rsc> ...]"
+ logger.info("ordering %s to stop", ", ".join(resources))
+ if not self._commit_meta_attrs(context, resources, "target-role", "Stopped"):
+ return False
+ if not utils.wait4dc("stop", not options.batch):
+ return False
+ logger.info("ordering %s to start", ", ".join(resources))
+ return self._commit_meta_attrs(context, resources, "target-role", "Started")
+
+ @command.wait
+ @command.completers(compl.resources)
+ def do_promote(self, context, rsc):
+ "usage: promote <rsc>"
+ if not utils.is_name_sane(rsc):
+ return False
+ if not xmlutil.RscState().is_ms_or_promotable_clone(rsc):
+ logger.error("%s is not a promotable resource", rsc)
+ return False
+ role = utils.handle_role_for_ocf_1_1(constants.RSC_ROLE_PROMOTED_LEGACY)
+ return utils.ext_cmd(self.rsc_setrole % (rsc, role)) == 0
+
+ def do_scores(self, context):
+ "usage: scores"
+ if utils.is_program('crm_simulate'):
+ utils.ext_cmd('crm_simulate -sUL')
+ elif utils.is_program('ptest'):
+ utils.ext_cmd('ptest -sUL')
+ else:
+ context.fatal_error("Need crm_simulate or ptest in path to display scores")
+
+ @command.completers(compl.resources)
+ def do_locate(self, context, *resources):
+ "usage: locate <rsc> [<rsc> ...]"
+ if len(resources) == 0:
+ context.error("Expected at least one resource as argument")
+ for rsc in resources:
+ utils.ext_cmd("crm_resource --resource '%s' --locate" % (rsc))
+
+ @command.wait
+ @command.completers(compl.resources)
+ def do_demote(self, context, rsc):
+ "usage: demote <rsc>"
+ if not utils.is_name_sane(rsc):
+ return False
+ if not xmlutil.RscState().is_ms_or_promotable_clone(rsc):
+ logger.error("%s is not a promotable resource", rsc)
+ return False
+ role = utils.handle_role_for_ocf_1_1(constants.RSC_ROLE_UNPROMOTED_LEGACY)
+ return utils.ext_cmd(self.rsc_setrole % (rsc, role)) == 0
+
+ @command.completers(compl.resources)
+ def do_manage(self, context, rsc):
+ "usage: manage <rsc>"
+ return self._commit_meta_attr(context, rsc, "is-managed", "true")
+
+ @command.completers(compl.resources)
+ def do_unmanage(self, context, rsc):
+ "usage: unmanage <rsc>"
+ return self._commit_meta_attr(context, rsc, "is-managed", "false")
+
+ def move_or_ban(self, context, rsc, *args):
+ """
+ Common codes for move or ban action
+ """
+ if not utils.is_name_sane(rsc):
+ return False
+ cmd_map_dict = {'move': self.rsc_migrate,
+ 'ban': self.rsc_ban}
+ action = context.get_command_name()
+ action_cap = action.capitalize()
+ action_cmd = cmd_map_dict[action]
+ usage = "usage: {} <rsc> [<node>] [<lifetime>] [force]".format(action)
+ node = None
+ lifetime = None
+
+ argl = list(args)
+ force = "force" in utils.fetch_opts(argl, ["force"]) or config.core.force
+ if len(argl) >= 3:
+ context.fatal_error(usage)
+ if len(argl) == 2: # must be <node> <lifetime>
+ node = argl[0]
+ if not xmlutil.is_our_node(node):
+ context.fatal_error("Not our node: " + node)
+ lifetime = utils.fetch_lifetime_opt(argl)
+ elif len(argl) == 1: # could be <node> or <lifetime>
+ if xmlutil.is_our_node(argl[0]):
+ node = argl[0]
+ else:
+ lifetime = utils.fetch_lifetime_opt(argl)
+
+ if action == "move" and not node and not force:
+ context.fatal_error("No target node: {} requires either a target node or 'force'".format(action_cap))
+
+ opts = ''
+ if node:
+ opts = "--node '%s'" % node
+ if lifetime:
+ opts = "%s --lifetime '%s'" % (opts, lifetime)
+ if force or config.core.force:
+ opts = "%s --force" % opts
+ rc = utils.ext_cmd(action_cmd % (rsc, opts))
+ if rc == 0:
+ if node:
+ logger.info("%s constraint created for %s to %s", action_cap, rsc, node)
+ else:
+ logger.info("%s constraint created for %s", action_cap, rsc)
+ logger.info("Use `crm resource clear %s` to remove this constraint", rsc)
+ return rc == 0
+
+ @command.alias('migrate')
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers_repeating(compl.resources, compl.nodes)
+ def do_move(self, context, rsc, *args):
+ """usage: move <rsc> [<node>] [<lifetime>] [force]"""
+ return self.move_or_ban(context, rsc, *args)
+
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers_repeating(compl.resources, compl.nodes)
+ def do_ban(self, context, rsc, *args):
+ """usage: ban <rsc> [<node>] [<lifetime>] [force]"""
+ return self.move_or_ban(context, rsc, *args)
+
+ @command.alias('unmove', 'unban', 'unmigrate')
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers(compl.resources)
+ def do_clear(self, context, rsc):
+ "usage: clear <rsc>"
+ if not utils.is_name_sane(rsc):
+ return False
+ rc = utils.ext_cmd(self.rsc_unmigrate % rsc)
+ if rc == 0:
+ logger.info("Removed migration constraints for %s", rsc)
+ return rc == 0
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers(compl.resources, compl.nodes)
+ def do_cleanup(self, context, rsc=None, node=None, force=False):
+ "usage: cleanup [<rsc>] [<node>] [force]"
+ return self._refresh_cleanup("cleanup", rsc, node, force)
+
+ @command.wait
+ @command.completers(compl.resources, compl.nodes)
+ def do_operations(self, context, resource=None, node=None):
+ "usage: operations [<rsc>] [<node>]"
+ cmd = "crm_resource -O"
+ if resource is None:
+ return utils.ext_cmd(cmd)
+ if node is None:
+ return utils.ext_cmd("%s -r '%s'" % (cmd, resource))
+ return utils.ext_cmd("%s -r '%s' -N '%s'" % (cmd, resource, node))
+
+ @command.wait
+ @command.completers(compl.resources)
+ def do_constraints(self, context, resource):
+ "usage: constraints <rsc>"
+ return utils.ext_cmd("crm_resource -a -r '%s'" % (resource))
+
+ @command.wait
+ @command.completers(compl.resources, _attrcmds, compl.nodes)
+ def do_failcount(self, context, rsc, cmd, node, value=None, operation=None, interval=None):
+ """usage:
+ failcount <rsc> set <node> <value> [operation] [interval]
+ failcount <rsc> delete <node>
+ failcount <rsc> show <node>"""
+ def sec_to_ms(s):
+ return s + '000'
+
+ def ms_to_sec(m):
+ return m[:len(m)-3]
+
+ if rsc not in compl.resources():
+ context.warning("Resource {} not exists in this cluster".format(rsc))
+ return
+ valid_cmd_list = ["set", "delete", "show"]
+ if cmd not in valid_cmd_list:
+ context.fatal_error("{} is not valid command(should be one of {})".format(cmd, valid_cmd_list))
+ nodeid = utils.get_nodeid_from_name(node)
+ if nodeid is None:
+ context.fatal_error("Node {} not in this cluster".format(node))
+
+ if cmd == "set":
+ # query the current failcount status
+ query_cmd = "cibadmin -Ql --xpath '/cib/status/node_state[@id='{}']'".format(nodeid)
+ rc, out, err = ShellUtils().get_stdout_stderr(query_cmd)
+ if rc != 0:
+ context.fatal_error(err)
+
+ # try to get failcount dict {operation:interval}
+ import re
+ failcount_res = re.findall(r'fail-count-{}#(.*)_([0-9]+)'.format(rsc), out)
+ if not failcount_res:
+ return True if value and int(value) == 0 else False
+ failcount_dict = dict(failcount_res)
+
+ # validate for operation and interval
+ if operation and operation not in failcount_dict.keys():
+ context.fatal_error("Usage: failcount <rsc> set <node> <value> [operation] [interval]\n\
+ Should specify operation between \"{}\"".format(' '.join(failcount_dict.keys())))
+ if (operation and interval) and (operation, sec_to_ms(interval)) not in failcount_res:
+ context.fatal_error("Usage: failcount <rsc> set <node> <value> [operation] [interval]\n\
+ Should specify (operation, interval) between {}".\
+ format([(op, ms_to_sec(inter)) for op, inter in failcount_res]))
+
+ # just one failcount entry
+ if len(failcount_res) == 1:
+ operation = failcount_res[0][0]
+ interval = failcount_dict[operation]
+ rsc = '{}#{}_{}'.format(rsc, operation, interval)
+
+ # multiple failcount entries for this resource and node
+ if len(failcount_res) > 1:
+ if operation and interval:
+ rsc = '{}#{}_{}'.format(rsc, operation, sec_to_ms(interval))
+ elif int(value) == 0:
+ # using '-P' option of 'crm_attribute' command
+ cmd = "set_p"
+ if operation:
+ op_interval_str = '|'.join(['{}_{}'.format(operation, inter) for op, inter in failcount_res if op==operation])
+ else:
+ op_interval_str = '|'.join(['{}_{}'.format(op, inter) for op, inter in failcount_res])
+ rsc = '{}#({})'.format(rsc, op_interval_str)
+ else:
+ # value != 0
+ if operation and len([op for op, _ in failcount_res if op == operation]) == 1:
+ rsc = '{}#{}_{}'.format(rsc, operation, failcount_dict[operation])
+ else:
+ context.fatal_error("Should specify (operation, interval) between {}".
+ format([(op, ms_to_sec(inter)) for op, inter in failcount_res]))
+
+ return ui_utils.manage_attr(context.get_command_name(), self.rsc_failcount,
+ rsc, cmd, node, value)
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers(compl.resources, _attrcmds)
+ def do_param(self, context, rsc, cmd, param, value=None):
+ """usage:
+ param <rsc> set <param> <value>
+ param <rsc> delete <param>
+ param <rsc> show <param>"""
+ return ui_utils.manage_attr(context.get_command_name(), self.rsc_param,
+ rsc, cmd, param, value)
+
+ @command.skill_level('administrator')
+ @command.completers(compl.resources,
+ compl.choice(['set', 'stash', 'unstash', 'delete', 'show', 'check']))
+ def do_secret(self, context, rsc, cmd, param, value=None):
+ """usage:
+ secret <rsc> set <param> <value>
+ secret <rsc> stash <param>
+ secret <rsc> unstash <param>
+ secret <rsc> delete <param>
+ secret <rsc> show <param>
+ secret <rsc> check <param>"""
+ return ui_utils.manage_attr(context.get_command_name(), self.rsc_secret,
+ rsc, cmd, param, value)
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers(compl.resources, _attrcmds)
+ def do_meta(self, context, rsc, cmd, attr, value=None):
+ """usage:
+ meta <rsc> set <attr> <value>
+ meta <rsc> delete <attr>
+ meta <rsc> show <attr>"""
+ return ui_utils.manage_attr(context.get_command_name(), self.rsc_meta,
+ rsc, cmd, attr, value)
+
+ @command.skill_level('administrator')
+ @command.wait
+ @command.completers(compl.resources, _attrcmds)
+ def do_utilization(self, context, rsc, cmd, attr, value=None):
+ """usage:
+ utilization <rsc> set <attr> <value>
+ utilization <rsc> delete <attr>
+ utilization <rsc> show <attr>"""
+ return ui_utils.manage_attr(context.get_command_name(), self.rsc_utilization,
+ rsc, cmd, attr, value)
+
+ @command.alias('reprobe')
+ @command.completers(compl.resources, compl.nodes)
+ def do_refresh(self, context, rsc=None, node=None, force=False):
+ 'usage: refresh [<rsc>] [<node>] [force]'
+ return self._refresh_cleanup("refresh", rsc, node, force)
+
+ @command.wait
+ @command.completers(compl.resources, compl.choice(['on', 'off', 'true', 'false']))
+ def do_maintenance(self, context, resource, on_off='true'):
+ 'usage: maintenance <resource> [on|off|true|false]'
+ on_off = on_off.lower()
+ if on_off not in ('on', 'true', 'off', 'false'):
+ context.fatal_error("Expected <resource> [on|off|true|false]")
+ elif on_off in ('on', 'true'):
+ on_off = 'true'
+ else:
+ on_off = 'false'
+ return self._commit_meta_attr(context, resource, "maintenance", on_off)
+
+ def _get_trace_rsc(self, rsc_id):
+ if not cib_factory.refresh():
+ return None
+ rsc = cib_factory.find_object(rsc_id)
+ if not rsc:
+ logger.error("resource %s does not exist", rsc_id)
+ return None
+ if rsc.obj_type != "primitive":
+ logger.error("element %s is not a primitive resource", rsc_id)
+ return None
+ return rsc
+
+ def _add_trace_op(self, rsc, op, interval, dir):
+ from lxml import etree
+ n = etree.Element('op')
+ n.set('name', op)
+ n.set('interval', interval)
+ n.set(constants.trace_ra_attr, '1')
+ if dir is not None:
+ n.set(constants.trace_dir_attr, dir)
+ return rsc.add_operation(n)
+
+ def _trace_resource(self, context, rsc_id, rsc, dir):
+ """Enable RA tracing for a specified resource."""
+ op_nodes = rsc.node.xpath('operations/op')
+
+ def trace(name):
+ for o in op_nodes:
+ if o.get('name') == name:
+ return
+ if not self._add_trace_op(rsc, name, '0', dir):
+ context.fatal_error("Failed to add trace for %s:%s" % (rsc_id, name))
+ trace('start')
+ trace('stop')
+ if xmlutil.is_ms_or_promotable_clone(rsc.node):
+ trace('promote')
+ trace('demote')
+ for op_node in op_nodes:
+ op_node = rsc.set_op_attr(op_node, constants.trace_ra_attr, "1")
+ if dir is not None:
+ rsc.set_op_attr(op_node, constants.trace_dir_attr, dir)
+
+ def _trace_op(self, context, rsc_id, rsc, op, dir):
+ """Enable RA tracing for a specified operation."""
+ op_nodes = rsc.node.xpath('operations/op[@name="%s"]' % (op))
+ if not op_nodes:
+ if op == 'monitor':
+ context.fatal_error("No monitor operation configured for %s" % (rsc_id))
+ if not self._add_trace_op(rsc, op, '0', dir):
+ context.fatal_error("Failed to add trace for %s:%s" % (rsc_id, op))
+ for op_node in op_nodes:
+ op_node = rsc.set_op_attr(op_node, constants.trace_ra_attr, "1")
+ if dir is not None:
+ rsc.set_op_attr(op_node, constants.trace_dir_attr, dir)
+
+ def _trace_op_interval(self, context, rsc_id, rsc, op, interval, dir):
+ """Enable RA tracing for an operation with the exact interval."""
+ op_node = xmlutil.find_operation(rsc.node, op, interval)
+ if op_node is None and utils.crm_msec(interval) != 0:
+ context.fatal_error("Operation %s with interval %s not found in %s" % (op, interval, rsc_id))
+ if op_node is None:
+ if not self._add_trace_op(rsc, op, interval, dir):
+ context.fatal_error("Failed to add trace for %s:%s" % (rsc_id, op))
+ else:
+ op_node = rsc.set_op_attr(op_node, constants.trace_ra_attr, "1")
+ if dir is not None:
+ rsc.set_op_attr(op_node, constants.trace_dir_attr, dir)
+
+ @command.completers(compl.primitives, _raoperations)
+ def do_trace(self, context, rsc_id, *args):
+ 'usage: trace <rsc> [<op>] [<interval>] [<log-dir>]'
+ usage='usage: trace <rsc> [<op>] [<interval>] [<log-dir>]'
+ rsc = self._get_trace_rsc(rsc_id)
+ if not rsc:
+ return False
+
+ argl = list(args)
+ force = "force" in utils.fetch_opts(argl, ["force"]) or config.core.force
+ if len(argl) > 3:
+ context.fatal_error(usage)
+ op=None
+ interval=None
+ dir=None
+ for arg in argl:
+ if arg[0] == '/':
+ if dir is not None:
+ context.fatal_error(usage)
+ dir = arg
+ elif arg.isnumeric():
+ if interval is not None:
+ context.fatal_error(usage)
+ interval = arg
+ else:
+ if op is not None:
+ context.fatal_error(usage)
+ op = arg
+
+ if op == "probe":
+ op = "monitor"
+ if interval is None:
+ interval = "0"
+ if op is None:
+ self._trace_resource(context, rsc_id, rsc, dir)
+ elif interval is None:
+ self._trace_op(context, rsc_id, rsc, op, dir)
+ else:
+ self._trace_op_interval(context, rsc_id, rsc, op, interval, dir)
+ if not cib_factory.commit():
+ return False
+ rsc_type = rsc.node.get("type")
+ trace_dir = "{}/{}".format(dir, rsc_type) if dir else "{}/trace_ra/{}".format(config.path.heartbeat_dir, rsc_type)
+ logger.info("Trace for %s%s is written to %s", rsc_id, ":"+op if op else "", trace_dir)
+ if op is not None and op != "monitor":
+ logger.info("Trace set, restart %s to trace the %s operation", rsc_id, op)
+ else:
+ logger.info("Trace set, restart %s to trace non-monitor operations", rsc_id)
+ return True
+
+ def _remove_trace(self, rsc, op_node):
+ logger.debug("op_node: %s", xmlutil.xml_tostring(op_node))
+ op_node = rsc.del_op_attr(op_node, constants.trace_ra_attr)
+ op_node = rsc.del_op_attr(op_node, constants.trace_dir_attr)
+ if rsc.is_dummy_operation(op_node):
+ rsc.del_operation(op_node)
+
+ def _untrace_resource(self, context, rsc_id, rsc):
+ """Disable RA tracing for a specified resource."""
+ op_nodes = rsc.node.xpath(
+ 'operations/op[instance_attributes/nvpair[@name="%s"]]' %
+ (constants.trace_ra_attr))
+ for op_node in op_nodes:
+ self._remove_trace(rsc, op_node)
+
+ def _untrace_op(self, context, rsc_id, rsc, op):
+ """Disable RA tracing for a specified operation."""
+ op_nodes = rsc.node.xpath('operations/op[@name="%s"]' % (op))
+ if not op_nodes:
+ context.fatal_error("Operation %s not found in %s" % (op, rsc_id))
+ for op_node in op_nodes:
+ self._remove_trace(rsc, op_node)
+
+ def _untrace_op_interval(self, context, rsc_id, rsc, op, interval):
+ """Disable RA tracing for an operation with the exact interval."""
+ op_node = xmlutil.find_operation(rsc.node, op, interval)
+ if op_node is None:
+ context.fatal_error(
+ "Operation %s with interval %s not found in %s" %
+ (op, interval, rsc_id))
+ self._remove_trace(rsc, op_node)
+
+ @command.completers(compl.primitives, _raoperations)
+ def do_untrace(self, context, rsc_id, op=None, interval=None):
+ 'usage: untrace <rsc> [<op>] [<interval>]'
+ rsc = self._get_trace_rsc(rsc_id)
+ if not rsc:
+ return False
+ if op == "probe":
+ op = "monitor"
+ if interval is None:
+ interval = "0"
+ if op is None:
+ self._untrace_resource(context, rsc_id, rsc)
+ elif interval is None:
+ self._untrace_op(context, rsc_id, rsc, op)
+ else:
+ self._untrace_op_interval(context, rsc_id, rsc, op, interval)
+ if not cib_factory.commit():
+ return False
+ logger.info("Stop tracing %s%s", rsc_id, " for operation "+op if op else "")
+ return True
diff --git a/crmsh/ui_root.py b/crmsh/ui_root.py
new file mode 100644
index 0000000..12d0f2e
--- /dev/null
+++ b/crmsh/ui_root.py
@@ -0,0 +1,204 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+# Revised UI structure for crmsh
+#
+# Goals:
+#
+# - Modularity
+# - Reduced global state
+# - Separate static hierarchy from current context
+# - Fix completion
+# - Implement bash completion
+# - Retain all previous functionality
+# - Have per-level pre-requirements:
+# def requires(self): <- raise error if prereqs are not met
+# This is so that crmsh can be installed with minimal prereqs,
+# and use cluster sublevel to install all requirements
+
+from . import command
+from . import completers as compl
+from . import cmd_status
+from . import ui_cib
+from . import ui_cibstatus
+from . import ui_cluster
+from . import ui_configure
+from . import ui_corosync
+from . import ui_history
+from . import ui_maintenance
+from . import ui_node
+from . import ui_options
+from . import ui_ra
+from . import ui_resource
+from . import ui_script
+from . import ui_site
+
+
+class Root(command.UI):
+ """
+ Root of the UI hierarchy.
+ """
+
+ # name is the user-visible name of this CLI level.
+ name = 'root'
+
+ @command.level(ui_cib.CibShadow)
+ @command.help('''manage shadow CIBs
+A shadow CIB is a regular cluster configuration which is kept in
+a file. The CRM and the CRM tools may manage a shadow CIB in the
+same way as the live CIB (i.e. the current cluster configuration).
+A shadow CIB may be applied to the cluster in one step.
+''')
+ def do_cib(self):
+ pass
+
+ @command.level(ui_cibstatus.CibStatusUI)
+ @command.help('''CIB status management and editing
+Enter edit and manage the CIB status section level.
+''')
+ def do_cibstatus(self):
+ pass
+
+ @command.level(ui_cluster.Cluster)
+ @command.help('''Cluster setup and management
+Commands at this level enable low-level cluster configuration
+management with HA awareness.
+''')
+ def do_cluster(self):
+ pass
+
+ @command.level(ui_configure.CibConfig)
+ @command.help('''CRM cluster configuration
+The configuration level.
+
+Note that you can change the working CIB at the cib level. It is
+advisable to configure shadow CIBs and then commit them to the
+cluster.
+''')
+ def do_configure(self):
+ pass
+
+ @command.level(ui_corosync.Corosync)
+ @command.help('''Corosync configuration management
+Corosync is the underlying messaging layer for most HA clusters.
+This level provides commands for editing and managing the corosync
+configuration.
+''')
+ def do_corosync(self):
+ pass
+
+ @command.level(ui_history.History)
+ @command.help('''CRM cluster history
+The history level.
+
+Examine Pacemaker's history: node and resource events, logs.
+''')
+ def do_history(self):
+ pass
+
+ @command.level(ui_maintenance.Maintenance)
+ @command.help('''maintenance
+Commands that should only be executed while in
+maintenance mode.
+''')
+ def do_maintenance(self):
+ pass
+
+ @command.level(ui_node.NodeMgmt)
+ @command.help('''nodes management
+A few node related tasks such as node standby are implemented
+here.
+''')
+ def do_node(self):
+ pass
+
+ @command.level(ui_options.CliOptions)
+ @command.help('''user preferences
+Several user preferences are available. Note that it is possible
+to save the preferences to a startup file.
+''')
+ def do_options(self):
+ pass
+
+ @command.level(ui_ra.RA)
+ @command.help('''resource agents information center
+This level contains commands which show various information about
+the installed resource agents. It is available both at the top
+level and at the `configure` level.
+''')
+ def do_ra(self):
+ pass
+
+ @command.help('''Utility to collect logs and other information
+`report` is a utility to collect all information (logs,
+configuration files, system information, etc) relevant to
+crmsh over the given period of time.
+''')
+ def do_report(self, context, *args):
+ import sys
+ from crmsh.report import core
+ sys.argv[1:] = args
+ core.run()
+
+ @command.level(ui_resource.RscMgmt)
+ @command.help('''resources management
+Everything related to resources management is available at this
+level. Most commands are implemented using the crm_resource(8)
+program.
+''')
+ def do_resource(self):
+ pass
+
+ @command.level(ui_script.Script)
+ @command.help('''Cluster scripts
+Cluster scripts can perform cluster-wide configuration,
+validation and management. See the `list` command for
+an overview of available scripts.
+''')
+ def do_script(self):
+ pass
+
+ @command.level(ui_site.Site)
+ @command.help('''Geo-cluster support
+The site level.
+
+Geo-cluster related management.
+''')
+ def do_site(self):
+ pass
+
+ @command.completers(compl.choice(compl.status_option))
+ @command.help('''show cluster status
+Show cluster status. The status is displayed by `crm_mon`. Supply
+additional arguments for more information or different format.
+See `crm_mon(8)` for more details.
+
+Usage:
+...............
+status [<option> ...]
+
+option :: bynode | inactive | ops | timing | failcounts
+...............
+''')
+ def do_status(self, context, *args):
+ return cmd_status.cmd_status(args)
+
+ @command.help('''Verify cluster state
+Performs basic checks for the cluster configuration and
+current status, reporting potential issues.
+
+Usage:
+.................
+verify [scores]
+.................
+''')
+ def do_verify(self, context, *args):
+ return cmd_status.cmd_verify(args)
+
+
+# this will initialize _children for all levels under the root
+Root.init_ui()
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/ui_script.py b/crmsh/ui_script.py
new file mode 100644
index 0000000..abf6f0b
--- /dev/null
+++ b/crmsh/ui_script.py
@@ -0,0 +1,523 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+import sys
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from . import config
+from . import command
+from . import scripts
+from . import utils
+from . import options
+from . import completers as compl
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+class ConsolePrinter(object):
+ def __init__(self):
+ self.in_progress = False
+
+ def print_header(self, script, params, hosts):
+ if script['shortdesc']:
+ logger.info(script['shortdesc'])
+ logger.info("Nodes: " + ', '.join([x[0] for x in hosts]))
+
+ def error(self, host, message):
+ logger.error("[%s]: %s", host, message)
+
+ def output(self, host, rc, out, err):
+ if out:
+ logger.info("[%s]: %s", host, out)
+ if err or rc != 0:
+ logger.error("[%s]: (rc=%d) %s", host, rc, err)
+
+ def start(self, action):
+ if not options.batch:
+ txt = '%s...' % (action['shortdesc'] or action['name'])
+ sys.stdout.write(txt)
+ sys.stdout.flush()
+ self.in_progress = True
+
+ def finish(self, action, rc, output):
+ self.flush()
+ if rc:
+ logger.info(action['shortdesc'] or action['name'])
+ else:
+ logger.error("%s (rc=%s)", action['shortdesc'] or action['name'], rc)
+ if output:
+ print(output)
+
+ def flush(self):
+ if self.in_progress:
+ self.in_progress = False
+ if not config.core.debug:
+ sys.stdout.write('\r')
+ else:
+ sys.stdout.write('\n')
+ sys.stdout.flush()
+
+ def debug(self, msg):
+ if config.core.debug or options.regression_tests:
+ self.flush()
+ logger.debug(msg)
+
+ def print_command(self, nodes, command):
+ self.flush()
+ sys.stdout.write("** %s - %s\n" % (nodes, command))
+
+
+class JsonPrinter(object):
+ def __init__(self):
+ self.results = []
+
+ def print_header(self, script, params, hosts):
+ pass
+
+ def error(self, host, message):
+ self.results.append({'host': str(host), 'error': str(message) if message else ''})
+
+ def output(self, host, rc, out, err):
+ ret = {'host': host, 'rc': rc, 'output': str(out)}
+ if err:
+ ret['error'] = str(err)
+ self.results.append(ret)
+
+ def start(self, action):
+ pass
+
+ def finish(self, action, rc, output):
+ ret = {'rc': rc, 'shortdesc': str(action['shortdesc'])}
+ if rc != 0 and not rc:
+ ret['error'] = str(output) if output else ''
+ else:
+ ret['output'] = str(output) if output else ''
+ print(json.dumps(ret, sort_keys=True))
+
+ def flush(self):
+ pass
+
+ def debug(self, msg):
+ if config.core.debug:
+ logger.debug(msg)
+
+ def print_command(self, nodes, command):
+ pass
+
+
+def describe_param(p, name, getall):
+ if not getall and p.get('advanced'):
+ return ""
+ opt = ' (required) ' if p['required'] else ''
+ opt += ' (unique) ' if p['unique'] else ''
+ if 'value' in p:
+ opt += (' (default: %s)' % (repr(p['value']))) if p['value'] else ''
+ s = " %s%s\n" % (name, opt)
+ s += " %s\n" % (p['shortdesc'])
+ return s
+
+
+def _scoped_name(context, name):
+ if context:
+ return ':'.join(context) + ':' + name
+ return name
+
+
+def describe_step(icontext, context, s, getall):
+ ret = "%s. %s" % ('.'.join([str(i + 1) for i in icontext]), scripts.format_desc(s['shortdesc']) or 'Parameters')
+ if not s['required']:
+ ret += ' (optional)'
+ ret += '\n\n'
+ if s.get('name'):
+ context = context + [s['name']]
+ for p in s.get('parameters', []):
+ ret += describe_param(p, _scoped_name(context, p['name']), getall)
+ for i, step in enumerate(s.get('steps', [])):
+ ret += describe_step(icontext + [i], context, step, getall)
+ return ret
+
+
+def _nvpairs2parameters(args):
+ """
+ input: list with name=value nvpairs, where each name is a :-path
+ output: dict tree of name:value, where value can be a nested dict tree
+ """
+ def _set(d, path, val):
+ if len(path) == 1:
+ d[path[0]] = val
+ else:
+ if path[0] not in d:
+ d[path[0]] = {}
+ _set(d[path[0]], path[1:], val)
+
+ ret = {}
+ for key, val in utils.nvpairs2dict(args).items():
+ _set(ret, key.split(':'), val)
+ return ret
+
+
+_fixups = {
+ 'wizard': 'Legacy Wizards',
+ 'sap': 'SAP',
+ 'nfs': 'NFS'
+}
+
+
+def _category_pretty(c):
+ v = _fixups.get(str(c).lower())
+ if v is not None:
+ return v
+ return str(c).capitalize()
+
+
+class Script(command.UI):
+ '''
+ Cluster scripts can perform cluster-wide configuration,
+ validation and management. See the `list` command for
+ an overview of available scripts.
+
+ The script UI is a thin veneer over the scripts
+ backend module.
+ '''
+ name = "script"
+
+ @command.completers_repeating(compl.choice(['all', 'names']))
+ def do_list(self, context, *args):
+ '''
+ List available scripts.
+ hides scripts with category Script or '' by default,
+ unless "all" is passed as argument
+ '''
+ for arg in args:
+ if arg.lower() not in ("all", "names"):
+ context.fatal_error("Unexpected argument '%s': expected [all|names]" % (arg))
+ show_all = any(x.lower() == 'all' for x in args)
+ names = any(x.lower() == 'names' for x in args)
+ if not names:
+ categories = {}
+ for name in scripts.list_scripts():
+ try:
+ script = scripts.load_script(name)
+ if script is None:
+ continue
+ cat = script['category'].lower()
+ if not show_all and cat == 'script':
+ continue
+ cat = _category_pretty(cat)
+ if cat not in categories:
+ categories[cat] = []
+ categories[cat].append("%-16s %s" % (script['name'], script['shortdesc']))
+ except ValueError as err:
+ logger.error(str(err))
+ continue
+ for c, lst in sorted(iter(categories.items()), key=lambda x: x[0]):
+ if c:
+ print("%s:\n" % (c))
+ for s in sorted(lst):
+ print(s)
+ print('')
+ elif show_all:
+ for name in scripts.list_scripts():
+ print(name)
+ else:
+ for name in scripts.list_scripts():
+ try:
+ script = scripts.load_script(name)
+ if script is None or script['category'].lower() == 'script':
+ continue
+ except ValueError as err:
+ logger.error(str(err))
+ continue
+ print(name)
+
+ @command.completers_repeating(compl.call(scripts.list_scripts))
+ @command.alias('info', 'describe')
+ def do_show(self, context, name, show_all=None):
+ '''
+ Describe the given script.
+ '''
+ script = scripts.load_script(name)
+ if script is None:
+ return False
+
+ show_all = show_all == 'all'
+
+ vals = {
+ 'name': script['name'],
+ 'category': _category_pretty(script['category']),
+ 'shortdesc': str(script['shortdesc']),
+ 'longdesc': scripts.format_desc(script['longdesc']),
+ 'steps': "\n".join((describe_step([i], [], s, show_all) for i, s in enumerate(script['steps'])))}
+ output = """%(name)s (%(category)s)
+%(shortdesc)s
+
+%(longdesc)s
+
+%(steps)s
+""" % vals
+ if show_all:
+ output += "Common Parameters\n\n"
+ for name, defval, desc in scripts.common_params():
+ output += " %s\n" % (name)
+ output += " %s\n" % (desc)
+ if defval is not None:
+ output += " (default: %s)\n" % (defval)
+ utils.page_string(output)
+
+ @command.completers(compl.call(scripts.list_scripts))
+ def do_verify(self, context, name, *args):
+ '''
+ Verify the script parameters
+ '''
+ script = scripts.load_script(name)
+ if script is None:
+ return False
+ ret = scripts.verify(script, _nvpairs2parameters(args))
+ if ret is None:
+ return False
+ if not ret:
+ print("OK (no actions)")
+ for i, action in enumerate(ret):
+ shortdesc = action.get('shortdesc', '')
+ text = str(action.get('text', ''))
+ longdesc = str(action.get('longdesc', ''))
+ print("%s. %s\n" % (i + 1, shortdesc))
+ if longdesc:
+ for line in str(longdesc).split('\n'):
+ print("\t%s" % (line))
+ print('')
+ if text:
+ for line in str(text).split('\n'):
+ print("\t%s" % (line))
+ print('')
+
+ @command.completers(compl.call(scripts.list_scripts))
+ def do_run(self, context, name, *args):
+ '''
+ Run the given script.
+ '''
+ script = scripts.load_script(name)
+ if script is not None:
+ return scripts.run(script, _nvpairs2parameters(args), ConsolePrinter())
+ return False
+
+ @command.name('_print')
+ @command.skill_level('administrator')
+ @command.completers(compl.call(scripts.list_scripts))
+ def do_print(self, context, name):
+ '''
+ Debug print the given script.
+ '''
+ script = scripts.load_script(name)
+ if script is None:
+ return False
+ import pprint
+ pprint.pprint(script)
+
+ @command.name('_actions')
+ @command.skill_level('administrator')
+ @command.completers(compl.call(scripts.list_scripts))
+ def do_actions(self, context, name, *args):
+ '''
+ Debug print the actions for the given script.
+ '''
+ script = scripts.load_script(name)
+ if script is None:
+ return False
+ ret = scripts.verify(script, _nvpairs2parameters(args))
+ if ret is None:
+ return False
+ import pprint
+ pprint.pprint(ret)
+
+ @command.name('_convert')
+ def do_convert(self, context, workflow, outdir=".", category="basic"):
+ """
+ Convert hawk wizards to cluster scripts
+ Needs more work to be really useful.
+ workflow: hawk workflow script
+ tgtdir: where the cluster script will be written
+ category: category set in new wizard
+ """
+ import yaml
+ import os
+ from .ordereddict import OrderedDict
+
+ def flatten(script):
+ if not isinstance(script, dict):
+ return script
+
+ for k, v in script.items():
+ if isinstance(v, scripts.Text):
+ script[k] = str(v)
+ elif isinstance(v, dict):
+ script[k] = flatten(v)
+ elif isinstance(v, tuple) or isinstance(v, list):
+ script[k] = [flatten(vv) for vv in v]
+ elif isinstance(v, str):
+ script[k] = v.strip()
+
+ return script
+
+ def order_rep(dumper, data):
+ return dumper.represent_mapping(u'tag:yaml.org,2002:map', list(data.items()), flow_style=False)
+
+ def scriptsorter(item):
+ order = ["version", "name", "category", "shortdesc", "longdesc", "include", "parameters", "steps", "actions"]
+ return order.index(item[0])
+
+ yaml.add_representer(OrderedDict, order_rep)
+ fromscript = os.path.abspath(workflow)
+ tgtdir = outdir
+
+ scripts.build_script_cache()
+ name = os.path.splitext(os.path.basename(fromscript))[0]
+ script = scripts.load_script_file(name, fromscript)
+ script = flatten(script)
+ script["category"] = category
+ del script["name"]
+ del script["dir"]
+ script["actions"] = [{"cib": "\n\n".join([action["cib"] for action in script["actions"]])}]
+
+ script = OrderedDict(sorted(list(script.items()), key=scriptsorter))
+ if script is not None:
+ try:
+ os.mkdir(os.path.join(tgtdir, name))
+ except:
+ pass
+ tgtfile = os.path.join(tgtdir, name, "main.yml")
+ with open(tgtfile, 'w') as tf:
+ try:
+ print("%s -> %s" % (fromscript, tgtfile))
+ yaml.dump([script], tf, explicit_start=True, default_flow_style=False)
+ except Exception as err:
+ print(err)
+
+ def _json_list(self, context, cmd):
+ """
+ ["list"]
+ """
+ for name in scripts.list_scripts():
+ try:
+ script = scripts.load_script(name)
+ if script is not None:
+ print(json.dumps({'name': name,
+ 'category': script['category'].lower(),
+ 'shortdesc': script['shortdesc'],
+ 'longdesc': scripts.format_desc(script['longdesc'])}, sort_keys=True))
+ except ValueError as err:
+ print(json.dumps({'name': name,
+ 'error': str(err)}, sort_keys=True))
+ return True
+
+ def _json_show(self, context, cmd):
+ """
+ ["show", <name>]
+ """
+ if len(cmd) < 2:
+ print(json.dumps({'error': 'Incorrect number of arguments: %s (expected %s)' % (len(cmd), 2)}))
+ return False
+ name = cmd[1]
+ script = scripts.load_script(name)
+ if script is None:
+ return False
+ print(json.dumps({'name': script['name'],
+ 'category': script['category'].lower(),
+ 'shortdesc': script['shortdesc'],
+ 'longdesc': scripts.format_desc(script['longdesc']),
+ 'steps': scripts.clean_steps(script['steps'])}, sort_keys=True))
+ return True
+
+ def _json_verify(self, context, cmd):
+ """
+ ["verify", <name>, <params>]
+ """
+ if len(cmd) < 3:
+ print(json.dumps({'error': 'Incorrect number of arguments: %s (expected %s)' % (len(cmd), 3)}))
+ return False
+ name = cmd[1]
+ params = cmd[2]
+ script = scripts.load_script(name)
+ if script is None:
+ return False
+ actions = scripts.verify(script, params)
+ if actions is None:
+ return False
+ else:
+ for action in actions:
+ obj = {'name': str(action.get('name', '')),
+ 'shortdesc': str(action.get('shortdesc', '')),
+ 'longdesc': str(action.get('longdesc', '')),
+ 'text': str(action.get('text', '')),
+ 'nodes': str(action.get('nodes', ''))}
+ if 'sudo' in action:
+ obj['sudo'] = action['sudo']
+ print(json.dumps(obj, sort_keys=True))
+ return True
+
+ def _json_run(self, context, cmd):
+ """
+ ["run", <name>, <params>]
+ """
+ if len(cmd) < 3:
+ print(json.dumps({'error': 'Incorrect number of arguments: %s (expected %s)' % (len(cmd), 3)}))
+ return False
+ name = cmd[1]
+ params = cmd[2]
+ script = scripts.load_script(name)
+ if script is None:
+ return False
+ printer = JsonPrinter()
+ ret = scripts.run(script, params, printer)
+ if not ret and printer.results:
+ for result in printer.results:
+ if 'error' in result:
+ print(json.dumps(result, sort_keys=True))
+ return ret
+
+ def do_json(self, context, command):
+ """
+ JSON API for the scripts, for use in web frontends.
+ Line-based output: enter a JSON command,
+ get lines of output back. In the description below, the output is
+ described as an array, but really it is returned line-by-line.
+
+ API:
+
+ ["list"]
+ => [{name, shortdesc, category}]
+ ["show", <name>]
+ => [{name, shortdesc, longdesc, category, <<steps>>}]
+ <<steps>> := [{name, shortdesc, longdesc, required, <<parameters>>, <<steps>>}]
+ <<params>> := [{name, shortdesc, longdesc, required, unique, type, advanced, value, example}]
+ ["verify", <name>, <values>]
+ => [{shortdesc, longdesc, nodes}]
+ ["run", <name>, <values>]
+ => [{shortdesc, rc, output|error}]
+ """
+ cmd = json.loads(command)
+ if len(cmd) < 1:
+ print(json.dumps({'error': 'Failed to decode valid JSON command'}))
+ return False
+ try:
+ if cmd[0] == "list":
+ return self._json_list(context, cmd)
+ elif cmd[0] == "show":
+ return self._json_show(context, cmd)
+ elif cmd[0] == "verify":
+ return self._json_verify(context, cmd)
+ elif cmd[0] == "run":
+ return self._json_run(context, cmd)
+ else:
+ print(json.dumps({'error': "Unknown command: %s" % (cmd[0])}))
+ return False
+ except ValueError as err:
+ print(json.dumps({'error': str(err)}))
+ return False
diff --git a/crmsh/ui_site.py b/crmsh/ui_site.py
new file mode 100644
index 0000000..4377328
--- /dev/null
+++ b/crmsh/ui_site.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import time
+from . import command
+from . import completers as compl
+from . import config
+from . import utils
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+_ticket_commands = {
+ 'grant': "%s -t '%s' -g",
+ 'revoke': "%s -t '%s' -r",
+ 'delete': "%s -t '%s' -D granted",
+ 'standby': "%s -t '%s' -s",
+ 'activate': "%s -t '%s' -a",
+ 'show': "%s -t '%s' -G granted",
+ 'time': "%s -t '%s' -G last-granted",
+}
+
+
+def _show(context, ticket, val):
+ "Display status of ticket"
+ if val == "false":
+ print("ticket %s is revoked" % ticket)
+ elif val == "true":
+ print("ticket %s is granted" % ticket)
+ else:
+ context.fatal_error("unexpected value for ticket %s: %s" % (ticket, val))
+
+
+def _time(context, ticket, val):
+ "Display grant time for ticket"
+ if not utils.is_int(val):
+ context.fatal_error("unexpected value for ticket %s: %s" % (ticket, val))
+ if val == "-1":
+ context.fatal_error("%s: no such ticket" % ticket)
+ print("ticket %s last time granted on %s" % (ticket, time.ctime(int(val))))
+
+
+class Site(command.UI):
+ '''
+ The site class
+ '''
+ name = "site"
+
+ def requires(self):
+ if not utils.is_program('crm_ticket'):
+ logger_utils.no_prog_err('crm_ticket')
+ return False
+ return True
+
+ @command.skill_level('administrator')
+ @command.completers(compl.choice(list(_ticket_commands.keys())))
+ def do_ticket(self, context, subcmd, ticket):
+ "usage: ticket {grant|revoke|standby|activate|show|time|delete} <ticket>"
+
+ base_cmd = "crm_ticket"
+ if config.core.force:
+ base_cmd += " --force"
+
+ attr_cmd = _ticket_commands.get(subcmd)
+ if not attr_cmd:
+ context.fatal_error('Expected one of %s' % '|'.join(list(_ticket_commands.keys())))
+ if not utils.is_name_sane(ticket):
+ return False
+ if subcmd not in ("show", "time"):
+ return utils.ext_cmd(attr_cmd % (base_cmd, ticket)) == 0
+ rc, l = utils.stdout2list(attr_cmd % (base_cmd, ticket))
+ try:
+ val = l[0]
+ except IndexError:
+ context.fatal_error("apparently nothing to show for ticket %s" % ticket)
+ if subcmd == "show":
+ _show(context, ticket, val)
+ else: # time
+ _time(context, ticket, val)
diff --git a/crmsh/ui_template.py b/crmsh/ui_template.py
new file mode 100644
index 0000000..7e03105
--- /dev/null
+++ b/crmsh/ui_template.py
@@ -0,0 +1,360 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import re
+import shlex
+from . import command
+from . import completers as compl
+from . import utils
+from . import config
+from . import userdir
+from . import options
+from .template import LoadTemplate
+from .cliformat import cli_format
+from .cibconfig import mkset_obj, cib_factory
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+def check_transition(inp, state, possible_l):
+ if state not in possible_l:
+ logger.error("input (%s) in wrong state %s", inp, state)
+ return False
+ return True
+
+
+def _unique_config_name(tmpl):
+ n = 0
+ while n < 99:
+ c = "%s-%d" % (tmpl, n)
+ if not os.path.isfile("%s/%s" % (userdir.CRMCONF_DIR, c)):
+ return c
+ n += 1
+ raise ValueError("Failed to generate unique configuration name")
+
+
+class Template(command.UI):
+ '''
+ Configuration templates.
+ '''
+ name = "template"
+
+ def __init__(self):
+ command.UI.__init__(self)
+ self.curr_conf = ''
+ self.init_dir()
+
+ @command.skill_level('administrator')
+ @command.completers_repeating(compl.null, compl.call(utils.listtemplates))
+ def do_new(self, context, name, *args):
+ "usage: new [<config>] <template> [<template> ...] [params name=value ...]"
+ if not utils.is_filename_sane(name):
+ return False
+ if os.path.isfile("%s/%s" % (userdir.CRMCONF_DIR, name)):
+ logger.error("config %s exists; delete it first", name)
+ return False
+ lt = LoadTemplate(name)
+ rc = True
+ mode = 0
+ params = {"id": name}
+ loaded_template = False
+ for s in args:
+ if mode == 0 and s == "params":
+ mode = 1
+ elif mode == 1:
+ a = s.split('=')
+ if len(a) != 2:
+ logger_utils.syntax_err(args, context='new')
+ rc = False
+ else:
+ params[a[0]] = a[1]
+ elif lt.load_template(s):
+ loaded_template = True
+ else:
+ rc = False
+ if not loaded_template:
+ if name not in utils.listtemplates():
+ logger.error("Expected template argument")
+ return False
+ tmpl = name
+ name = _unique_config_name(tmpl)
+ lt = LoadTemplate(name)
+ lt.load_template(tmpl)
+ if rc:
+ lt.post_process(params)
+ if not rc or not lt.write_config(name):
+ return False
+ self.curr_conf = name
+
+ @command.skill_level('administrator')
+ @command.completers(compl.call(utils.listconfigs))
+ def do_delete(self, context, name, force=''):
+ "usage: delete <config> [force]"
+ if force:
+ if force != "force" and force != "--force":
+ logger_utils.syntax_err((context.get_command_name(), force), context='delete')
+ return False
+ if not self.config_exists(name):
+ return False
+ if name == self.curr_conf:
+ if not force and not config.core.force and \
+ not utils.ask("Do you really want to remove config %s which is in use?" %
+ self.curr_conf):
+ return False
+ else:
+ self.curr_conf = ''
+ os.remove("%s/%s" % (userdir.CRMCONF_DIR, name))
+
+ @command.skill_level('administrator')
+ @command.completers(compl.call(utils.listconfigs))
+ def do_load(self, context, name=''):
+ "usage: load [<config>]"
+ if not name:
+ self.curr_conf = ''
+ return True
+ if not self.config_exists(name):
+ return False
+ self.curr_conf = name
+
+ @command.skill_level('administrator')
+ @command.completers(compl.call(utils.listconfigs))
+ def do_edit(self, context, name=''):
+ "usage: edit [<config>]"
+ if not name and not self.curr_conf:
+ logger.error("please load a config first")
+ return False
+ if name:
+ if not self.config_exists(name):
+ return False
+ utils.edit_file("%s/%s" % (userdir.CRMCONF_DIR, name))
+ else:
+ utils.edit_file("%s/%s" % (userdir.CRMCONF_DIR, self.curr_conf))
+
+ @command.completers(compl.call(utils.listconfigs))
+ def do_show(self, context, name=''):
+ "usage: show [<config>]"
+ if not name and not self.curr_conf:
+ logger.error("please load a config first")
+ return False
+ if name:
+ if not self.config_exists(name):
+ return False
+ print(self.process(name))
+ else:
+ print(self.process())
+
+ @command.skill_level('administrator')
+ @command.completers(compl.join(compl.call(utils.listconfigs),
+ compl.choice(['replace', 'update'])),
+ compl.call(utils.listconfigs))
+ def do_apply(self, context, *args):
+ "usage: apply [<method>] [<config>]"
+ method = "replace"
+ name = ''
+ if len(args) > 0:
+ i = 0
+ if args[0] in ("replace", "update"):
+ method = args[0]
+ i += 1
+ if len(args) > i:
+ name = args[i]
+ if not name and not self.curr_conf:
+ logger.error("please load a config first")
+ return False
+ if name:
+ if not self.config_exists(name):
+ return False
+ s = self.process(name)
+ else:
+ s = self.process()
+ if not s:
+ return False
+ tmp = utils.str2tmp(s)
+ if not tmp:
+ return False
+ if method == "replace":
+ if options.interactive and cib_factory.has_cib_changed():
+ if not utils.ask("This operation will erase all changes. Do you want to proceed?"):
+ return False
+ cib_factory.erase()
+ set_obj = mkset_obj()
+ rc = set_obj.import_file(method, tmp)
+ try:
+ os.unlink(tmp)
+ except:
+ pass
+ return rc
+
+ @command.completers(compl.choice(['configs', 'templates']))
+ def do_list(self, context, templates=''):
+ "usage: list [configs|templates]"
+ if templates == "templates":
+ utils.multicolumn(utils.listtemplates())
+ elif templates == "configs":
+ utils.multicolumn(utils.listconfigs())
+ else:
+ print("Templates:")
+ utils.multicolumn(utils.listtemplates())
+ print("\nConfigurations:")
+ utils.multicolumn(utils.listconfigs())
+
+ def init_dir(self):
+ '''Create the conf directory, link to templates'''
+ if not os.path.isdir(userdir.CRMCONF_DIR):
+ try:
+ os.makedirs(userdir.CRMCONF_DIR)
+ except os.error as msg:
+ logger.error("makedirs: %s", msg)
+
+ def get_depends(self, tmpl):
+ '''return a list of required templates'''
+ # Not used. May need it later.
+ try:
+ templatepath = os.path.join(config.path.sharedir, 'templates', tmpl)
+ tf = open(templatepath, "r")
+ except IOError as msg:
+ logger.error("open: %s", msg)
+ return
+ l = []
+ for s in tf:
+ a = s.split()
+ if len(a) >= 2 and a[0] == '%depends_on':
+ l += a[1:]
+ tf.close()
+ return l
+
+ def config_exists(self, name):
+ if not utils.is_filename_sane(name):
+ return False
+ if not os.path.isfile("%s/%s" % (userdir.CRMCONF_DIR, name)):
+ logger.error("%s: no such config", name)
+ return False
+ return True
+
+ def replace_params(self, s, user_data):
+ change = False
+ for i, word in enumerate(s):
+ for p in user_data:
+ # is parameter in the word?
+ pos = word.find('%' + p)
+ if pos < 0:
+ continue
+ endpos = pos + len('%' + p)
+ # and it isn't part of another word?
+ if re.match("[A-Za-z0-9]", word[endpos:endpos+1]):
+ continue
+ # if the value contains a space or
+ # it is a value of an attribute
+ # put quotes around it
+ if user_data[p].find(' ') >= 0 or word[pos-1:pos] == '=':
+ v = '"' + user_data[p] + '"'
+ else:
+ v = user_data[p]
+ word = word.replace('%' + p, v)
+ change = True # we did replace something
+ if change:
+ s[i] = word
+ if 'opt' in s:
+ if not change:
+ s = []
+ else:
+ s.remove('opt')
+ return s
+
+ def generate(self, l, user_data):
+ '''replace parameters (user_data) and generate output
+ '''
+ l2 = []
+ for piece in l:
+ piece2 = []
+ for s in piece:
+ s = self.replace_params(s, user_data)
+ if s:
+ piece2.append(' '.join(s))
+ if piece2:
+ l2.append(cli_format(piece2, break_lines=True))
+ return '\n'.join(l2)
+
+ def process(self, config=''):
+ '''Create a cli configuration from the current config'''
+ try:
+ f = open("%s/%s" % (userdir.CRMCONF_DIR, config or self.curr_conf), 'r')
+ except IOError as msg:
+ logger.error("open: %s", msg)
+ return ''
+ l = []
+ piece = []
+ user_data = {}
+ # states
+ START = 0
+ PFX = 1
+ DATA = 2
+ GENERATE = 3
+ state = START
+ with logger_utils.line_number():
+ rc = True
+ for inp in f:
+ inp = utils.to_ascii(inp)
+ logger_utils.incr_lineno()
+ if inp.startswith('#'):
+ continue
+ inp = inp.strip()
+ try:
+ s = shlex.split(inp)
+ except ValueError as msg:
+ logger.error(msg)
+ continue
+ while '\n' in s:
+ s.remove('\n')
+ if not s:
+ if state == GENERATE and piece:
+ l.append(piece)
+ piece = []
+ elif s[0] in ("%name", "%depends_on", "%suggests"):
+ continue
+ elif s[0] == "%pfx":
+ if check_transition(inp, state, (START, DATA)) and len(s) == 2:
+ pfx = s[1]
+ state = PFX
+ elif s[0] == "%required":
+ if check_transition(inp, state, (PFX,)):
+ state = DATA
+ data_reqd = True
+ elif s[0] == "%optional":
+ if check_transition(inp, state, (PFX, DATA)):
+ state = DATA
+ data_reqd = False
+ elif s[0] == "%%":
+ if state != DATA:
+ logger.warning("user data in wrong state %s", state)
+ if len(s) < 2:
+ logger.warning("parameter name missing")
+ elif len(s) == 2:
+ if data_reqd:
+ logger.error("required parameter %s not set", s[1])
+ rc = False
+ elif len(s) == 3:
+ user_data["%s:%s" % (pfx, s[1])] = s[2]
+ else:
+ logger.error("%s: syntax error", inp)
+ elif s[0] == "%generate" and check_transition(inp, state, (DATA,)):
+ state = GENERATE
+ piece = []
+ elif state == GENERATE and s:
+ piece.append(s)
+ else:
+ logger.error("<%s> unexpected", inp)
+ if piece:
+ l.append(piece)
+ f.close()
+ if not rc:
+ return ''
+ return self.generate(l, user_data)
+
+
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/ui_utils.py b/crmsh/ui_utils.py
new file mode 100644
index 0000000..33f7c86
--- /dev/null
+++ b/crmsh/ui_utils.py
@@ -0,0 +1,164 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import re
+import inspect
+from . import utils
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+def _get_attr_cmd(attr_ext_commands, subcmd):
+ try:
+ attr_cmd = attr_ext_commands[subcmd]
+ if attr_cmd:
+ return attr_cmd
+ except KeyError as msg:
+ raise ValueError(msg)
+ raise ValueError("Bad attr_cmd " + repr(attr_ext_commands))
+
+
+def _dispatch_attr_cmd(cmd, attr_cmd, rsc, subcmd, attr, value):
+ def sanity_check(arg):
+ if not utils.is_name_sane(arg):
+ raise ValueError("Expected valid name, got '%s'" % (arg))
+ if subcmd in ['set', 'set_p']:
+ if value is None:
+ raise ValueError("Missing value argument to set")
+ sanity_check(rsc)
+ sanity_check(attr)
+ sanity_check(value)
+ return utils.ext_cmd(attr_cmd % (rsc, attr, value)) == 0
+ elif subcmd in ('delete', 'show') or \
+ (cmd == "secret" and subcmd in ('stash', 'unstash', 'check')):
+ if value is not None:
+ raise ValueError("Too many arguments to %s" % (subcmd))
+ sanity_check(rsc)
+ sanity_check(attr)
+ return utils.ext_cmd(attr_cmd % (rsc, attr)) == 0
+ raise ValueError("Unknown command " + repr(subcmd))
+
+
+def manage_attr(cmd, attr_ext_commands, rsc, subcmd, attr, value):
+ '''
+ TODO: describe.
+ '''
+ try:
+ attr_cmd = _get_attr_cmd(attr_ext_commands, subcmd)
+ if re.search("\w+=\w+", attr):
+ attr, value = attr.split('=')
+ return _dispatch_attr_cmd(cmd, attr_cmd, rsc, subcmd, attr, value)
+ except ValueError as msg:
+ cmdline = [rsc, subcmd, attr]
+ if value is not None:
+ cmdline.append(value)
+ logger_utils.bad_usage(cmd, ' '.join(cmdline), msg)
+ return False
+
+
+def ptestlike(simfun, def_verb, cmd, args):
+ verbosity = def_verb # default verbosity
+ nograph = False
+ scores = False
+ utilization = False
+ actions = False
+ for p in args:
+ if p == "nograph":
+ nograph = True
+ elif p == "scores":
+ scores = True
+ elif p == "utilization":
+ utilization = True
+ elif p == "actions":
+ actions = True
+ elif re.match("^vv*$", p):
+ verbosity = p
+ else:
+ logger_utils.bad_usage(cmd, ' '.join(args))
+ return False
+ return simfun(nograph, scores, utilization, actions, verbosity)
+
+
+def graph_args(args):
+ '''
+ Common parameters for two graph commands:
+ configure graph [<gtype> [<file> [<img_format>]]]
+ history graph <pe> [<gtype> [<file> [<img_format>]]]
+ '''
+ def tryarg(n, orelse):
+ try:
+ return args[n]
+ except IndexError:
+ return orelse
+ except TypeError:
+ return orelse
+
+ from .crm_gv import gv_types
+ gtype, outf, ftype = None, None, None
+ gtype = tryarg(0, "dot")
+ if gtype not in gv_types:
+ logger.error("graph type %s is not supported", gtype)
+ return False, gtype, outf, ftype
+ outf = tryarg(1, None)
+ if outf is not None and not utils.is_path_sane(outf):
+ return False, gtype, outf, ftype
+ ftype = tryarg(2, gtype)
+ return True, gtype, outf, ftype
+
+
+def pretty_arguments(f, nskip=0):
+ '''
+ Returns a prettified representation
+ of the command arguments
+ '''
+ specs = inspect.getfullargspec(f)
+ named_args = []
+ if specs.defaults is None:
+ named_args += specs.args
+ else:
+ named_args += specs.args[:-len(specs.defaults)]
+ named_args += [("[%s]" % a) for a in specs.args[-len(specs.defaults):]]
+ if specs.varargs:
+ named_args += ['[%s ...]' % (specs.varargs)]
+ if nskip:
+ named_args = named_args[nskip:]
+ return ' '.join(named_args)
+
+
+def validate_arguments(f, args, nskip=0):
+ '''
+ Compares the declared arguments of f to
+ the given arguments in args, and raises
+ ValueError if the arguments don't match.
+
+ nskip: When reporting an error, skip these
+ many initial arguments when counting.
+ For example, pass 1 to not count self on a
+ method.
+
+ Note: Does not support keyword arguments.
+ '''
+ specs = inspect.getfullargspec(f)
+ min_args = len(specs.args)
+ if specs.defaults is not None:
+ min_args -= len(specs.defaults)
+ max_args = len(specs.args)
+ if specs.varargs:
+ max_args = -1
+
+ def mknamed():
+ return pretty_arguments(f, nskip=nskip)
+
+ if min_args == max_args and len(args) != min_args:
+ raise ValueError("Expected (%s), takes exactly %d arguments (%d given)" %
+ (mknamed(), min_args-nskip, len(args)-nskip))
+ elif len(args) < min_args:
+ raise ValueError("Expected (%s), takes at least %d arguments (%d given)" %
+ (mknamed(), min_args-nskip, len(args)-nskip))
+ if max_args >= 0 and len(args) > max_args:
+ raise ValueError("Expected (%s), takes at most %d arguments (%d given)" %
+ (mknamed(), max_args-nskip, len(args)-nskip))
diff --git a/crmsh/upgradeutil.py b/crmsh/upgradeutil.py
new file mode 100644
index 0000000..0c560d8
--- /dev/null
+++ b/crmsh/upgradeutil.py
@@ -0,0 +1,194 @@
+import logging
+import os.path
+import typing
+
+import sys
+
+import crmsh.healthcheck
+import crmsh.parallax
+import crmsh.utils
+from crmsh import user_of_host
+from crmsh.prun import prun
+
+
+# pump this seq when upgrade check need to be run
+CURRENT_UPGRADE_SEQ = (1, 0)
+DATA_DIR = '/var/lib/crmsh'
+SEQ_FILE_PATH = DATA_DIR + '/upgrade_seq'
+# touch this file to force a upgrade process
+FORCE_UPGRADE_FILE_PATH = DATA_DIR + '/upgrade_forced'
+
+TIMEOUT_SSH_GET_SEQUENCE = 3
+
+
+VERSION_FEATURES = {
+ (1, 0): [crmsh.healthcheck.PasswordlessHaclusterAuthenticationFeature]
+}
+
+
+logger = logging.getLogger(__name__)
+
+
+class _SkipUpgrade(Exception):
+ pass
+
+
+def _parse_upgrade_seq(s: bytes) -> typing.Tuple[int, int]:
+ parts = s.split(b'.', 1)
+ if len(parts) != 2:
+ raise ValueError('Invalid upgrade seq {}'.format(s))
+ major = int(parts[0])
+ minor = int(parts[1])
+ return major, minor
+
+
+def _format_upgrade_seq(s: typing.Tuple[int, int]) -> str:
+ return '.'.join((str(x) for x in s))
+
+
+def _get_file_content(path, default=None):
+ try:
+ with open(path, 'rb') as f:
+ return f.read()
+ except FileNotFoundError:
+ return default
+
+
+def _parallax_run(nodes: str, cmd: str) -> typing.Dict[str, typing.Tuple[int, bytes, bytes]]:
+ results = prun.prun({node: cmd for node in nodes})
+ for node, result in results.items():
+ if isinstance(result, prun.SSHError):
+ raise ValueError("Failed on {}: {}".format(node, result))
+ return {node: (result.returncode, result.stdout, result.stderr) for node, result in results.items()}
+
+
+def _is_upgrade_needed(nodes):
+ """decide whether upgrading is needed by checking local sequence file"""
+ needed = False
+ try:
+ os.stat(FORCE_UPGRADE_FILE_PATH)
+ needed = True
+ except FileNotFoundError:
+ pass
+ if not needed:
+ s = _get_file_content(SEQ_FILE_PATH, b'').strip()
+ if s == b'':
+ # try the old path
+ seq_file_path = os.path.expanduser('~hacluster/crmsh') + '/upgrade_seq'
+ s = _get_file_content(seq_file_path, b'').strip()
+ if s != b'':
+ try:
+ os.mkdir(DATA_DIR)
+ except FileExistsError:
+ pass
+ with open(SEQ_FILE_PATH, 'wb') as f:
+ f.write(s)
+ f.write(b'\n')
+ try:
+ local_seq = _parse_upgrade_seq(s)
+ except ValueError:
+ local_seq = (0, 0)
+ needed = CURRENT_UPGRADE_SEQ > local_seq
+ return needed
+
+
+def _is_cluster_target_seq_consistent(nodes):
+ cmd = '/usr/bin/env python3 -m crmsh.upgradeutil get-seq'
+ try:
+ results = prun.prun({node: cmd for node in nodes}, timeout_seconds=TIMEOUT_SSH_GET_SEQUENCE)
+ for node, result in results.items():
+ if isinstance(result, prun.PRunError):
+ logger.debug("upgradeutil: get-seq failed: %s", result)
+ raise _SkipUpgrade() from None
+ except (prun.PRunError, user_of_host.UserNotFoundError) as e:
+ logger.debug("upgradeutil: get-seq failed: %s", e)
+ raise _SkipUpgrade() from None
+ try:
+ return all(
+ CURRENT_UPGRADE_SEQ == _parse_upgrade_seq(result.stdout.strip()) if result.returncode == 0 else False
+ for result in results.values()
+ )
+ except ValueError as e:
+ logger.warning("Remote command '%s' returns unexpected output: %s", cmd, results, exc_info=e)
+ return False
+
+
+def _get_minimal_seq_in_cluster(nodes) -> typing.Tuple[int, int]:
+ try:
+ return min(
+ _parse_upgrade_seq(stdout.strip()) if rc == 0 else (0, 0)
+ for rc, stdout, stderr in _parallax_run(nodes, 'cat {}'.format(SEQ_FILE_PATH)).values()
+ )
+ except ValueError:
+ return 0, 0
+
+
+def _upgrade(nodes, seq):
+ def ask(msg: str):
+ pass
+ try:
+ for key in VERSION_FEATURES.keys():
+ if seq < key <= CURRENT_UPGRADE_SEQ:
+ for feature_class in VERSION_FEATURES[key]:
+ feature = feature_class()
+ if crmsh.healthcheck.feature_full_check(feature, nodes):
+ logger.debug("upgradeutil: feature '%s' is already functional.", str(feature))
+ else:
+ logger.debug("upgradeutil: fixing feature '%s'...", str(feature))
+ crmsh.healthcheck.feature_fix(feature, nodes, ask)
+ logger.debug("upgradeutil: configuration fix succeeded.")
+ except crmsh.healthcheck.AskDeniedByUser:
+ raise _SkipUpgrade() from None
+
+
+def upgrade_if_needed():
+ if os.geteuid() != 0:
+ return
+ if not crmsh.utils.can_ask(background_wait=False):
+ return
+ nodes = crmsh.utils.list_cluster_nodes(no_reg=True)
+ if nodes is not None and len(nodes) > 1 \
+ and _is_upgrade_needed(nodes):
+ logger.debug("upgradeutil: configuration fix needed")
+ try:
+ if not _is_cluster_target_seq_consistent(nodes):
+ logger.warning("crmsh configuration is inconsistent in cluster.")
+ raise _SkipUpgrade()
+ seq = _get_minimal_seq_in_cluster(nodes)
+ logger.debug(
+ "Upgrading crmsh from seq %s to %s.",
+ seq, _format_upgrade_seq(CURRENT_UPGRADE_SEQ),
+ )
+ _upgrade(nodes, seq)
+ except _SkipUpgrade:
+ logger.debug("upgradeutil: configuration fix skipped")
+ return
+ crmsh.parallax.parallax_call(
+ nodes,
+ "mkdir -p '{}' && echo '{}' > '{}'".format(
+ DATA_DIR,
+ _format_upgrade_seq(CURRENT_UPGRADE_SEQ),
+ SEQ_FILE_PATH,
+ ),
+ )
+ crmsh.parallax.parallax_call(nodes, 'rm -f {}'.format(FORCE_UPGRADE_FILE_PATH))
+ logger.debug("configuration fix finished")
+
+
+def force_set_local_upgrade_seq():
+ """Create the upgrade sequence file and set it to CURRENT_UPGRADE_SEQ.
+
+ It should only be used when initializing new cluster nodes."""
+ if not os.path.exists(DATA_DIR):
+ crmsh.utils.mkdirs_owned(DATA_DIR, mode=0o755, uid='root', gid='root')
+ up_seq = _format_upgrade_seq(CURRENT_UPGRADE_SEQ)
+ crmsh.utils.str2file(up_seq, SEQ_FILE_PATH)
+
+
+def main():
+ if sys.argv[1] == 'get-seq':
+ print(_format_upgrade_seq(CURRENT_UPGRADE_SEQ))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/crmsh/user_of_host.py b/crmsh/user_of_host.py
new file mode 100644
index 0000000..4551dce
--- /dev/null
+++ b/crmsh/user_of_host.py
@@ -0,0 +1,122 @@
+import logging
+import socket
+import subprocess
+import typing
+
+from . import config
+from . import constants
+from . import userdir
+from .pyshim import cache
+
+
+logger = logging.getLogger(__name__)
+
+
+class UserNotFoundError(ValueError):
+ pass
+
+
+class UserOfHost:
+ @staticmethod
+ def instance():
+ return _user_of_host_instance
+
+ @staticmethod
+ @cache
+ def this_node():
+ return socket.gethostname()
+
+ def __init__(self):
+ self._user_cache = dict()
+ self._user_pair_cache = dict()
+
+ def user_of(self, host):
+ cached = self._user_cache.get(host)
+ if cached is None:
+ ret = self._get_user_of_host_from_config(host)
+ if ret is None:
+ raise UserNotFoundError()
+ else:
+ self._user_cache[host] = ret
+ return ret
+ else:
+ return cached
+
+ def user_pair_for_ssh(self, host: str) -> typing.Tuple[str, str]:
+ """Return (local_user, remote_user) pair for ssh connection"""
+ local_user = None
+ remote_user = None
+ try:
+ local_user = 'root' if self.use_ssh_agent() else self.user_of(self.this_node())
+ remote_user = self.user_of(host)
+ return local_user, remote_user
+ except UserNotFoundError:
+ cached = self._user_pair_cache.get(host)
+ if cached is None:
+ if local_user is not None:
+ ret = local_user, local_user
+ self._user_pair_cache[host] = ret
+ return ret
+ else:
+ ret = self._guess_user_for_ssh(host)
+ if ret is None:
+ raise UserNotFoundError
+ else:
+ self._user_pair_cache[host] = ret
+ return ret
+ else:
+ return cached
+
+ @staticmethod
+ def use_ssh_agent() -> bool:
+ return config.get_option('core', 'no_generating_ssh_key')
+
+ @staticmethod
+ def _get_user_of_host_from_config(host):
+ try:
+ canonical, aliases, _ = socket.gethostbyaddr(host)
+ aliases = set(aliases)
+ aliases.add(canonical)
+ aliases.add(host)
+ except (socket.herror, socket.gaierror):
+ aliases = {host}
+ hosts = config.get_option('core', 'hosts')
+ if hosts == ['']:
+ return None
+ for item in hosts:
+ if item.find('@') != -1:
+ user, node = item.split('@')
+ else:
+ user = userdir.getuser()
+ node = item
+ if node in aliases:
+ return user
+ logger.debug('Failed to get the user of host %s (aliases: %s). Known hosts are %s', host, aliases, hosts)
+ return None
+
+ @staticmethod
+ def _guess_user_for_ssh(host: str) -> typing.Tuple[str, str]:
+ args = ['ssh']
+ args.extend(constants.SSH_OPTION_ARGS)
+ if userdir.get_sudoer():
+ args.extend(['-o', 'BatchMode=yes', host, 'sudo', 'true'])
+ else:
+ args.extend(['-o', 'BatchMode=yes', host, 'true'])
+ rc = subprocess.call(
+ args,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ if rc == 0:
+ user = userdir.getuser()
+ return user, user
+ else:
+ return None
+
+
+_user_of_host_instance = UserOfHost()
+
+
+def instance():
+ return _user_of_host_instance
diff --git a/crmsh/userdir.py b/crmsh/userdir.py
new file mode 100644
index 0000000..d2df4f7
--- /dev/null
+++ b/crmsh/userdir.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import typing
+import pwd
+
+from . import log
+
+
+logger = log.setup_logger(__name__)
+
+
+def getuser():
+ "Returns the name of the current effective user"
+ effective_uid = os.geteuid()
+ user_info = pwd.getpwuid(effective_uid)
+ return user_info.pw_name
+
+
+def get_sudoer() -> typing.Optional[str]:
+ return os.environ.get('SUDO_USER')
+
+
+def gethomedir(user=''):
+ return os.path.expanduser("~" + user)
+
+
+# see http://standards.freedesktop.org/basedir-spec
+CONFIG_HOME = os.path.join(os.path.expanduser("~/.config"), 'crm')
+CACHE_HOME = os.path.join(os.path.expanduser("~/.cache"), 'crm')
+try:
+ from xdg import BaseDirectory
+ CONFIG_HOME = os.path.join(BaseDirectory.xdg_config_home, 'crm')
+ CACHE_HOME = os.path.join(BaseDirectory.xdg_cache_home, 'crm')
+except:
+ pass
+
+# TODO: move to CONFIG_HOME
+HISTORY_FILE = os.path.expanduser("~/.crm_history")
+RC_FILE = os.path.expanduser("~/.crm.rc")
+CRMCONF_DIR = os.path.expanduser("~/.crmconf")
+
+GRAPHVIZ_USER_FILE = os.path.join(CONFIG_HOME, "graphviz")
+
+
+def mv_user_files():
+ '''
+ Called from main
+ '''
+ global HISTORY_FILE
+ global RC_FILE
+ global CRMCONF_DIR
+
+ def _xdg_file(name, xdg_name, chk_fun, directory):
+ if not name:
+ return name
+ os.makedirs(directory, 0o700, exist_ok=True)
+ # FileExistsError will be raised if `directory` exists and it is not a directory
+ new = os.path.join(directory, xdg_name)
+ if directory == CONFIG_HOME and chk_fun(new) and chk_fun(name):
+ logger.warning("both %s and %s exist, please cleanup", name, new)
+ return name
+ if chk_fun(name):
+ if directory == CONFIG_HOME:
+ logger.info("moving %s to %s", name, new)
+ else:
+ logger.debug("moving %s to %s", name, new)
+ os.rename(name, new)
+ return new
+
+ HISTORY_FILE = _xdg_file(HISTORY_FILE, "history", os.path.isfile, CACHE_HOME)
+ RC_FILE = _xdg_file(RC_FILE, "rc", os.path.isfile, CONFIG_HOME)
+ CRMCONF_DIR = _xdg_file(CRMCONF_DIR, "crmconf", os.path.isdir, CONFIG_HOME)
diff --git a/crmsh/utils.py b/crmsh/utils.py
new file mode 100644
index 0000000..f35f088
--- /dev/null
+++ b/crmsh/utils.py
@@ -0,0 +1,3150 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+import errno
+import os
+import socket
+import sys
+import typing
+from tempfile import mkstemp
+import subprocess
+import re
+import glob
+import time
+import datetime
+import shutil
+import shlex
+import bz2
+import fnmatch
+import gc
+import ipaddress
+import argparse
+import random
+import string
+import grp
+import gzip
+import bz2
+import lzma
+from pathlib import Path
+from contextlib import contextmanager, closing
+from stat import S_ISBLK
+from lxml import etree
+
+import crmsh.parallax
+import crmsh.user_of_host
+from . import config, sh
+from . import userdir
+from . import constants
+from . import options
+from . import term
+from distutils.version import LooseVersion
+from .constants import SSH_OPTION
+from . import log
+from .prun import prun
+from .sh import ShellUtils
+from .service_manager import ServiceManager
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+class TerminateSubCommand(Exception):
+ """
+ This is an exception to jump out of subcommand when meeting errors while staying interactive shell
+ """
+
+
+def to_ascii(input_str):
+ """Convert the bytes string to a ASCII string
+ Usefull to remove accent (diacritics)"""
+ if input_str is None:
+ return input_str
+ if isinstance(input_str, str):
+ return input_str
+ try:
+ return str(input_str, 'utf-8')
+ except UnicodeDecodeError:
+ if config.core.debug or options.regression_tests:
+ import traceback
+ traceback.print_exc()
+ return input_str.decode('utf-8', errors='ignore')
+
+
+def filter_keys(key_list, args, sign="="):
+ """Return list item which not be completed yet"""
+ return [s+sign for s in key_list if any_startswith(args, s+sign) is None]
+
+
+def any_startswith(iterable, prefix):
+ """Return first element in iterable which startswith prefix, or None."""
+ for element in iterable:
+ if element.startswith(prefix):
+ return element
+ return None
+
+
+def rindex(iterable, value):
+ return len(iterable) - iterable[::-1].index(value) - 1
+
+
+def memoize(function):
+ "Decorator to invoke a function once only for any argument"
+ memoized = {}
+
+ def inner(*args):
+ if args in memoized:
+ return memoized[args]
+ r = function(*args)
+ memoized[args] = r
+ return r
+ return inner
+
+
+@contextmanager
+def nogc():
+ gc.disable()
+ try:
+ yield
+ finally:
+ gc.enable()
+
+
+getuser = userdir.getuser
+gethomedir = userdir.gethomedir
+
+
+def user_of(host):
+ return crmsh.user_of_host.instance().user_of(host)
+
+
+def user_pair_for_ssh(host):
+ try:
+ return crmsh.user_of_host.instance().user_pair_for_ssh(host)
+ except crmsh.user_of_host.UserNotFoundError:
+ raise ValueError('Can not create ssh session from {} to {}.'.format(this_node(), host))
+
+
+def ssh_copy_id_no_raise(local_user, remote_user, remote_node):
+ if check_ssh_passwd_need(local_user, remote_user, remote_node):
+ logger.info("Configuring SSH passwordless with {}@{}".format(remote_user, remote_node))
+ cmd = "ssh-copy-id -i ~/.ssh/id_rsa.pub '{}@{}' &> /dev/null".format(remote_user, remote_node)
+ result = sh.LocalShell().su_subprocess_run(local_user, cmd, tty=True)
+ return result.returncode
+ else:
+ return 0
+
+
+def ssh_copy_id(local_user, remote_user, remote_node):
+ if 0 != ssh_copy_id_no_raise(local_user, remote_user, remote_node):
+ fatal("Failed to login to remote host {}@{}".format(remote_user, remote_node))
+
+
+@memoize
+def this_node():
+ 'returns name of this node (hostname)'
+ return os.uname()[1]
+
+
+_cib_shadow = 'CIB_shadow'
+_cib_in_use = ''
+
+
+def set_cib_in_use(name):
+ os.putenv(_cib_shadow, name)
+ global _cib_in_use
+ _cib_in_use = name
+
+
+def clear_cib_in_use():
+ os.unsetenv(_cib_shadow)
+ global _cib_in_use
+ _cib_in_use = ''
+
+
+def get_cib_in_use():
+ return _cib_in_use
+
+
+def get_tempdir():
+ return os.getenv("TMPDIR") or "/tmp"
+
+
+def is_program(prog):
+ """Is this program available?"""
+ def isexec(filename):
+ return os.path.isfile(filename) and os.access(filename, os.X_OK)
+ for p in os.getenv("PATH").split(os.pathsep):
+ f = os.path.join(p, prog)
+ if isexec(f):
+ return f
+ return None
+
+
+def pacemaker_20_daemon(new, old):
+ "helper to discover renamed pacemaker daemons"
+ if is_program(new):
+ return new
+ return old
+
+
+@memoize
+def pacemaker_attrd():
+ return pacemaker_20_daemon("pacemaker-attrd", "attrd")
+
+
+@memoize
+def pacemaker_based():
+ return pacemaker_20_daemon("pacemaker-based", "cib")
+
+
+@memoize
+def pacemaker_controld():
+ return pacemaker_20_daemon("pacemaker-controld", "crmd")
+
+
+@memoize
+def pacemaker_execd():
+ return pacemaker_20_daemon("pacemaker-execd", "lrmd")
+
+
+@memoize
+def pacemaker_fenced():
+ return pacemaker_20_daemon("pacemaker-fenced", "stonithd")
+
+
+@memoize
+def pacemaker_remoted():
+ return pacemaker_20_daemon("pacemaker-remoted", "pacemaker_remoted")
+
+
+@memoize
+def pacemaker_schedulerd():
+ return pacemaker_20_daemon("pacemaker-schedulerd", "pengine")
+
+
+def pacemaker_daemon(name):
+ if name == "attrd" or name == "pacemaker-attrd":
+ return pacemaker_attrd()
+ if name == "cib" or name == "pacemaker-based":
+ return pacemaker_based()
+ if name == "crmd" or name == "pacemaker-controld":
+ return pacemaker_controld()
+ if name == "lrmd" or name == "pacemaker-execd":
+ return pacemaker_execd()
+ if name == "stonithd" or name == "pacemaker-fenced":
+ return pacemaker_fenced()
+ if name == "pacemaker_remoted" or name == "pacemeaker-remoted":
+ return pacemaker_remoted()
+ if name == "pengine" or name == "pacemaker-schedulerd":
+ return pacemaker_schedulerd()
+ raise ValueError("Not a Pacemaker daemon name: {}".format(name))
+
+
+def can_ask(background_wait=True):
+ """
+ Is user-interactivity possible?
+ Checks if connected to a TTY.
+ """
+ can_ask = (not options.ask_no) and sys.stdin.isatty()
+ if not background_wait:
+ try:
+ can_ask = can_ask and os.tcgetpgrp(sys.stdin.fileno()) == os.getpgrp()
+ except OSError as e:
+ if e.errno == errno.ENOTTY:
+ can_ask = False
+ return can_ask
+
+
+def ask(msg, background_wait=True):
+ """Ask for user confirmation.
+
+ Parameters:
+ * background_wait: When set to False, return False without asking if current process is in background. Otherwise,
+ block until the process is brought to foreground.
+
+ Global Options:
+ * core.force: always return true without asking
+ * options.ask_no: do not ask and return false
+ """
+ if config.core.force:
+ logger.info("%s [YES]", msg)
+ return True
+ if not can_ask(background_wait):
+ return False
+
+ msg += ' '
+ if msg.endswith('? '):
+ msg = msg[:-2] + ' (y/n)? '
+
+ while True:
+ try:
+ ans = input(msg)
+ except EOFError:
+ ans = 'n'
+ if ans:
+ ans = ans[0].lower()
+ if ans in 'yn':
+ return ans == 'y'
+
+
+def ask_for_choice(question: str, choices: typing.List[str], default: int = None, background_wait=True, yes_to_all=False) -> int:
+ msg = '{} ({})? '.format(question, '/'.join((choice if i != default else '[{}]'.format(choice) for i, choice in enumerate(choices))))
+ if yes_to_all and default is not None:
+ logger.info('%s %s', msg, choices[default])
+ return default
+ if not can_ask(background_wait):
+ if default is None:
+ fatal("User input is impossible in a non-interactive session.")
+ else:
+ logger.info('%s %s', msg, choices[default])
+ return default
+ while True:
+ try:
+ choice = input(msg)
+ except EOFError:
+ choice = ''
+ if choice == '':
+ if default is not None:
+ return default
+ else:
+ for i, x in enumerate(choices):
+ if choice == x:
+ return i
+
+
+# holds part of line before \ split
+# for a multi-line input
+_LINE_BUFFER = ''
+
+
+def get_line_buffer():
+ return _LINE_BUFFER
+
+
+def multi_input(prompt=''):
+ """
+ Get input from user
+ Allow multiple lines using a continuation character
+ """
+ global _LINE_BUFFER
+ line = []
+ _LINE_BUFFER = ''
+ while True:
+ try:
+ text = input(prompt)
+ except EOFError:
+ return None
+ if options.regression_tests:
+ logger_utils.incr_lineno()
+ print(".INP:", text)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ stripped = text.strip()
+ if stripped.endswith('\\'):
+ stripped = stripped.rstrip('\\')
+ line.append(stripped)
+ _LINE_BUFFER += stripped
+ if prompt:
+ prompt = ' > '
+ else:
+ line.append(stripped)
+ break
+ return ''.join(line)
+
+
+def verify_boolean(opt):
+ return opt.lower() in ("yes", "true", "on", "1") or \
+ opt.lower() in ("no", "false", "off", "0")
+
+
+def is_boolean_true(opt):
+ if opt in (None, False):
+ return False
+ if opt is True:
+ return True
+ return opt.lower() in ("yes", "true", "on", "1")
+
+
+def is_boolean_false(opt):
+ if opt in (None, False):
+ return True
+ if opt is True:
+ return False
+ return opt.lower() in ("no", "false", "off", "0")
+
+
+def get_boolean(opt, dflt=False):
+ if not opt:
+ return dflt
+ return is_boolean_true(opt)
+
+
+def canonical_boolean(opt):
+ return 'true' if is_boolean_true(opt) else 'false'
+
+
+def keyword_cmp(string1, string2):
+ return string1.lower() == string2.lower()
+
+
+class olist(list):
+ """
+ Implements the 'in' operator
+ in a case-insensitive manner,
+ allowing "if x in olist(...)"
+ """
+ def __init__(self, keys):
+ super(olist, self).__init__([k.lower() for k in keys])
+
+ def __contains__(self, key):
+ return super(olist, self).__contains__(key.lower())
+
+ def append(self, key):
+ super(olist, self).append(key.lower())
+
+
+def os_types_list(path):
+ l = []
+ for f in glob.glob(path):
+ if os.access(f, os.X_OK) and os.path.isfile(f):
+ a = f.split("/")
+ l.append(a[-1])
+ return l
+
+
+def listtemplates():
+ l = []
+ templates_dir = os.path.join(config.path.sharedir, 'templates')
+ for f in os.listdir(templates_dir):
+ if os.path.isfile("%s/%s" % (templates_dir, f)):
+ l.append(f)
+ return l
+
+
+def listconfigs():
+ l = []
+ for f in os.listdir(userdir.CRMCONF_DIR):
+ if os.path.isfile("%s/%s" % (userdir.CRMCONF_DIR, f)):
+ l.append(f)
+ return l
+
+
+def add_sudo(cmd):
+ if config.core.user:
+ return "sudo -E -u %s %s" % (config.core.user, cmd)
+ return cmd
+
+
+def chown(path, user, group):
+ if isinstance(user, int):
+ uid = user
+ else:
+ import pwd
+ uid = pwd.getpwnam(user).pw_uid
+ if isinstance(group, int):
+ gid = group
+ else:
+ import grp
+ gid = grp.getgrnam(group).gr_gid
+ try:
+ os.chown(path, uid, gid)
+ except os.error as err:
+ cmd = "sudo chown {}:{} {}".format(user, group, path)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd, no_reg=True)
+ if rc != 0:
+ fatal("Failed to chown {}: {}".format(path, err))
+
+
+def chmod(path, mod):
+ try:
+ os.chmod(path, mod)
+ except os.error as err:
+ cmd = "sudo chmod {} {}".format(format(mod,'o'), path)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd, no_reg=True)
+ if rc != 0:
+ fatal("Failed to chmod {}: {}".format(path, err))
+
+
+def touch(file_name):
+ rc, out, err = ShellUtils().get_stdout_stderr("touch " + file_name, no_reg=True)
+ if rc != 0:
+ rc, out, err = ShellUtils().get_stdout_stderr("sudo touch " + file_name, no_reg=True)
+ if rc != 0:
+ fatal("Failed create file {}: {}".format(file_name, err))
+
+
+def copy_local_file(src, dest):
+ try:
+ shutil.copyfile(src, dest)
+ except os.error as err:
+ if err.errno not in (errno.EPERM, errno.EACCES):
+ raise
+ rc, out, err = ShellUtils().get_stdout_stderr("sudo cp {} {}".format(src, dest), no_reg=True)
+ if rc != 0:
+ fatal("Failed to copy file from {} to {}: {}".format(src, dest, err))
+ cmd = "sudo chown {}:{} {}".format(userdir.getuser(), "haclient", dest)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd, no_reg=True)
+ if rc != 0:
+ fatal("Failed to chown {}: {}".format(dest, err))
+
+
+def rmfile(path, ignore_errors=False):
+ """
+ Try to remove the given file, and
+ report an error on failure
+ """
+ try:
+ os.remove(path)
+ except os.error as err:
+ if err.errno in (errno.EPERM, errno.EACCES):
+ rc, out, err = ShellUtils().get_stdout_stderr("sudo rm " + path, no_reg=True)
+ if rc != 0 and not ignore_errors:
+ fatal("Failed to remove {}: {}".format(path, err))
+ elif not ignore_errors:
+ raise
+
+
+def ensure_sudo_readable(f):
+ # make sure the tempfile is readable to crm_diff (bsc#999683)
+ if config.core.user:
+ from pwd import getpwnam
+ uid = getpwnam(config.core.user).pw_uid
+ try:
+ os.chown(f, uid, -1)
+ except os.error as err:
+ logger.error('Failed setting temporary file permissions: %s', err)
+ return False
+ return True
+
+
+def pipe_string(cmd, s):
+ rc = -1 # command failed
+ cmd = add_sudo(cmd)
+ logger.debug("piping string to %s", cmd)
+ if options.regression_tests:
+ print(".EXT", cmd)
+ p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
+ try:
+ # communicate() expects encoded bytes
+ if isinstance(s, str):
+ s = s.encode('utf-8')
+ p.communicate(s)
+ p.wait()
+ rc = p.returncode
+ except IOError as msg:
+ if "Broken pipe" not in str(msg):
+ logger.error(msg)
+ return rc
+
+
+def filter_string(cmd, s, stderr_on=True, shell=True):
+ rc = -1 # command failed
+ outp = ''
+ if stderr_on is True:
+ stderr = None
+ else:
+ stderr = subprocess.PIPE
+ cmd = add_sudo(cmd)
+ logger.debug("pipe through %s", cmd)
+ if options.regression_tests:
+ print(".EXT", cmd)
+ p = subprocess.Popen(cmd,
+ shell=shell,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=stderr)
+ try:
+ # bytes expected here
+ if isinstance(s, str):
+ s = s.encode('utf-8')
+ ret = p.communicate(s)
+ if stderr_on == 'stdout':
+ outp = b"\n".join(ret)
+ else:
+ outp = ret[0]
+ p.wait()
+ rc = p.returncode
+ except OSError as err:
+ if err.errno != os.errno.EPIPE:
+ logger.error(err.strerror)
+ logger.error("from: %s", cmd)
+ except Exception as msg:
+ logger.error("from: %s: %s", cmd, str(msg))
+ return rc, to_ascii(outp)
+
+
+def str2tmp(_str, suffix=".pcmk"):
+ '''
+ Write the given string to a temporary file. Return the name
+ of the file.
+ '''
+ s = to_ascii(_str)
+ fd, tmp = mkstemp(suffix=suffix)
+ try:
+ f = os.fdopen(fd, "w")
+ except IOError as msg:
+ logger.error(msg)
+ return
+ f.write(s)
+ if not s.endswith('\n'):
+ f.write("\n")
+ f.close()
+ return tmp
+
+
+@contextmanager
+def create_tempfile(suffix='', dir=None):
+ """ Context for temporary file.
+
+ Will find a free temporary filename upon entering
+ and will try to delete the file on leaving, even in case of an exception.
+
+ Parameters
+ ----------
+ suffix : string
+ optional file suffix
+ dir : string
+ optional directory to save temporary file in
+
+ (from http://stackoverflow.com/a/29491523)
+ """
+ import tempfile
+ tf = tempfile.NamedTemporaryFile(delete=False, suffix=suffix, dir=dir)
+ tf.file.close()
+ try:
+ yield tf.name
+ finally:
+ try:
+ os.remove(tf.name)
+ except OSError as e:
+ if e.errno == 2:
+ pass
+ else:
+ raise
+
+
+@contextmanager
+def open_atomic(filepath, mode="r", buffering=-1, fsync=False, encoding=None):
+ """ Open temporary file object that atomically moves to destination upon
+ exiting.
+
+ Allows reading and writing to and from the same filename.
+
+ The file will not be moved to destination in case of an exception.
+
+ Parameters
+ ----------
+ filepath : string
+ the file path to be opened
+ fsync : bool
+ whether to force write the file to disk
+
+ (from http://stackoverflow.com/a/29491523)
+ """
+
+ with create_tempfile(dir=os.path.dirname(os.path.abspath(filepath))) as tmppath:
+ with open(tmppath, mode, buffering, encoding=encoding) as file:
+ try:
+ yield file
+ finally:
+ if fsync:
+ file.flush()
+ os.fsync(file.fileno())
+ os.rename(tmppath, filepath)
+
+
+def str2file(s, fname, mod=0o644):
+ '''
+ Write a string to a file.
+ '''
+ try:
+ with open_atomic(fname, 'w', encoding='utf-8', fsync=True) as dst:
+ dst.write(to_ascii(s))
+ os.chmod(fname, mod)
+ except IOError as msg:
+ # If we failed under current user, repeat under root
+ escaped = s.translate(str.maketrans({'"': r'\"'})) # other symbols are already escaped
+ cmd = 'printf "{}" | sudo tee {} >/dev/null'.format(escaped, fname)
+ cmd += ' && sudo chmod {} {}'.format(format(mod,'o'), fname)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd, no_reg=True)
+ if rc != 0:
+ #raise ValueError("Failed to write to {}: {}".format(s, err)) # fatal?
+ logger.error(err)
+ return False
+ return True
+
+def write_remote_file(text, tofile, user, remote):
+ shell_script = f'''cat >> {tofile} << EOF
+{text}
+EOF
+'''
+ result = sh.cluster_shell().subprocess_run_without_input(
+ remote,
+ user,
+ shell_script,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.PIPE,
+ )
+ if result.returncode != 0:
+ raise ValueError("Failed to write to {}@{}/{}: {}".format(user, remote, tofile, result.stdout.decode('utf-8')))
+
+
+def copy_remote_textfile(remote_user, remote_node, remote_text_file, local_path):
+ """
+ scp might lack permissions to copy the file for a non-root user.
+ And the root might be disabled (PermitRootLogin No).
+ So lets do $ ssh alice@node1 sudo cat <file-name>
+ and save it locally.
+ """
+ # First try an easy way
+ cmd = "scp %s@%s:'%s' %s" % (remote_user, remote_node, remote_text_file, local_path)
+ rc, out, err = get_stdout_stderr_as_local_sudoer(cmd)
+ # If failed, try the hard way
+ if rc != 0:
+ rc, out, err = sh.cluster_shell().get_rc_stdout_stderr_without_input(remote_node, 'cat {}'.format(remote_text_file))
+ if rc != 0:
+ raise ValueError("Failed to read {}@{}/{}: {}".format(remote_user, remote_node, remote_text_file, err))
+ full_path = os.path.join(local_path, os.path.basename(remote_text_file))
+ str2file(out, full_path)
+
+def file2str(fname, noerr=True):
+ '''
+ Read a one line file into a string, strip whitespace around.
+ '''
+ try:
+ f = open(fname, "r")
+ except IOError as msg:
+ if not noerr:
+ logger.error(msg)
+ return None
+ s = f.readline()
+ f.close()
+ return s.strip()
+
+
+def safe_open_w(fname):
+ if fname == "-":
+ f = sys.stdout
+ else:
+ if not options.batch and os.access(fname, os.F_OK):
+ if not ask("File %s exists. Do you want to overwrite it?" % fname):
+ return None
+ try:
+ f = open(fname, "w")
+ except IOError as msg:
+ logger.error(msg)
+ return None
+ return f
+
+
+def safe_close_w(f):
+ if f and f != sys.stdout:
+ f.close()
+
+
+def is_path_sane(name):
+ if re.search(r"['`#*?$\[\];]", name):
+ logger.error("%s: bad path", name)
+ return False
+ return True
+
+
+def is_filename_sane(name):
+ if re.search(r"['`/#*?$\[\];]", name):
+ logger.error("%s: bad filename", name)
+ return False
+ return True
+
+
+def is_name_sane(name):
+ if re.search("[']", name):
+ logger.error("%s: bad name", name)
+ return False
+ return True
+
+
+def show_dot_graph(dotfile, keep_file=False, desc="transition graph"):
+ cmd = "%s %s" % (config.core.dotty, dotfile)
+ if not keep_file:
+ cmd = "(%s; rm -f %s)" % (cmd, dotfile)
+ if options.regression_tests:
+ print(".EXT", cmd)
+ subprocess.Popen(cmd, shell=True, bufsize=0,
+ stdin=None, stdout=None, stderr=None, close_fds=True)
+ logger.info("starting %s to show %s", config.core.dotty, desc)
+
+
+def ext_cmd(cmd, shell=True):
+ cmd = add_sudo(cmd)
+ if options.regression_tests:
+ print(".EXT", cmd)
+ logger.debug("invoke: %s", cmd)
+ return subprocess.call(cmd, shell=shell)
+
+
+def ext_cmd_nosudo(cmd, shell=True):
+ if options.regression_tests:
+ print(".EXT", cmd)
+ return subprocess.call(cmd, shell=shell)
+
+
+def rmdir_r(d):
+ # TODO: Make sure we're not deleting something we shouldn't!
+ if d and os.path.isdir(d):
+ shutil.rmtree(d)
+
+
+def nvpairs2dict(pairs):
+ '''
+ takes a list of string of form ['a=b', 'c=d']
+ and returns {'a':'b', 'c':'d'}
+ '''
+ data = []
+ for var in pairs:
+ if '=' in var:
+ data.append(var.split('=', 1))
+ else:
+ data.append([var, None])
+ return dict(data)
+
+
+def is_check_always():
+ '''
+ Even though the frequency may be set to always, it doesn't
+ make sense to do that with non-interactive sessions.
+ '''
+ return options.interactive and config.core.check_frequency == "always"
+
+
+def get_check_rc():
+ '''
+ If the check mode is set to strict, then on errors we
+ return 2 which is the code for error. Otherwise, we
+ pretend that errors are warnings.
+ '''
+ return 2 if config.core.check_mode == "strict" else 1
+
+
+_LOCKDIR = ".lockdir"
+_PIDF = "pid"
+
+
+def check_locker(lockdir):
+ if not os.path.isdir(os.path.join(lockdir, _LOCKDIR)):
+ return
+ s = file2str(os.path.join(lockdir, _LOCKDIR, _PIDF))
+ pid = convert2ints(s)
+ if not isinstance(pid, int):
+ logger.warning("history: removing malformed lock")
+ rmdir_r(os.path.join(lockdir, _LOCKDIR))
+ return
+ try:
+ os.kill(pid, 0)
+ except OSError as err:
+ if err.errno == os.errno.ESRCH:
+ logger.info("history: removing stale lock")
+ rmdir_r(os.path.join(lockdir, _LOCKDIR))
+ else:
+ logger.error("%s: %s", _LOCKDIR, err.strerror)
+
+
+@contextmanager
+def lock(lockdir):
+ """
+ Ensure that the lock is released properly
+ even in the face of an exception between
+ acquire and release.
+ """
+ def acquire_lock():
+ check_locker(lockdir)
+ while True:
+ try:
+ os.makedirs(os.path.join(lockdir, _LOCKDIR))
+ str2file("%d" % os.getpid(), os.path.join(lockdir, _LOCKDIR, _PIDF))
+ return True
+ except OSError as err:
+ if err.errno != os.errno.EEXIST:
+ logger.error("Failed to acquire lock to %s: %s", lockdir, err.strerror)
+ return False
+ time.sleep(0.1)
+ continue
+ else:
+ return False
+
+ has_lock = acquire_lock()
+ try:
+ yield
+ finally:
+ if has_lock:
+ rmdir_r(os.path.join(lockdir, _LOCKDIR))
+
+
+def mkdirp(directory, mode=0o777, parents=True, exist_ok=True):
+ """
+ Same behavior as the POSIX mkdir -p command
+ """
+ Path(directory).mkdir(mode, parents, exist_ok)
+
+def mkdirs_owned(dirs, mode=0o777, uid=-1, gid=-1):
+ """
+ Create directory path, setting the mode and
+ ownership of the leaf directory to mode/uid/gid.
+ It won't fail if the directory already exist (exist_ok===true).
+ """
+ if not os.path.exists(dirs):
+ try:
+ if not os.path.exists(dirs):
+ os.makedirs(dirs, mode)
+ except OSError as err:
+ # If we failed under current user, repeat under root
+ cmd = "sudo mkdir {}".format(dirs)
+ cmd += " && sudo chmod {} {}".format(format(mode,'o'), dirs)
+ if gid == -1:
+ gid = "haclient"
+ if uid == -1:
+ uid = userdir.getuser()
+ cmd += " && sudo chown {} {}".format(uid, dirs)
+ cmd += " && sudo chgrp {} {}".format(gid, dirs)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd, no_reg=True)
+ if rc != 0:
+ fatal("Failed to create {}: {}".format(' '.join(dirs), err))
+ return
+ if uid != -1 or gid != -1:
+ chown(dirs, uid, gid)
+
+def pipe_cmd_nosudo(cmd):
+ if options.regression_tests:
+ print(".EXT", cmd)
+ proc = subprocess.Popen(cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (outp, err_outp) = proc.communicate()
+ proc.wait()
+ rc = proc.returncode
+ if rc != 0:
+ print(outp)
+ print(err_outp)
+ return rc
+
+
+def get_stdout_stderr_as_local_sudoer(cmd, input_s=None):
+ try:
+ user = user_of(this_node())
+ except crmsh.user_of_host.UserNotFoundError:
+ user = 'root'
+ return sh.LocalShell().get_rc_stdout_stderr(user, cmd, input_s)
+
+
+def stdout2list(cmd, stderr_on=True, shell=True):
+ '''
+ Run a cmd, fetch output, return it as a list of lines.
+ stderr_on controls whether to show output which comes on stderr.
+ '''
+ rc, s = ShellUtils().get_stdout(add_sudo(cmd), stderr_on=stderr_on, shell=shell)
+ if not s:
+ return rc, []
+ return rc, s.split('\n')
+
+
+def append_file(dest, src):
+ 'Append src to dest'
+ try:
+ open(dest, "a").write(open(src).read())
+ return True
+ except IOError as msg:
+ logger.error("append %s to %s: %s", src, dest, msg)
+ return False
+
+
+def get_dc(peer=None):
+ cmd = "crmadmin -D -t 1"
+ _, out, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(peer, cmd)
+ if not out:
+ return None
+ if not out.startswith("Designated"):
+ return None
+ return out.split()[-1]
+
+
+def wait4dc(what="", show_progress=True):
+ '''
+ Wait for the DC to get into the S_IDLE state. This should be
+ invoked only after a CIB modification which would exercise
+ the PE. Parameter "what" is whatever the caller wants to be
+ printed if showing progress.
+
+ It is assumed that the DC is already in a different state,
+ usually it should be either PENGINE or TRANSITION. This
+ assumption may not be true, but there's a high chance that it
+ is since crmd should be faster to move through states than
+ this shell.
+
+ Further, it may also be that crmd already calculated the new
+ graph, did transition, and went back to the idle state. This
+ may in particular be the case if the transition turned out to
+ be empty.
+
+ Tricky. Though in practice it shouldn't be an issue.
+
+ There's no timeout, as we expect the DC to eventually becomes
+ idle.
+ '''
+ dc = get_dc()
+ if not dc:
+ logger.warning("can't find DC")
+ return False
+ cmd = "crm_attribute -Gq -t crm_config -n crmd-transition-delay 2> /dev/null"
+ delay = ShellUtils().get_stdout(add_sudo(cmd))[1]
+ if delay:
+ delaymsec = crm_msec(delay)
+ if delaymsec > 0:
+ logger.info("The crmd-transition-delay is configured. Waiting %d msec before check DC status.", delaymsec)
+ time.sleep(delaymsec // 1000)
+ cnt = 0
+ output_started = 0
+ init_sleep = 0.25
+ max_sleep = 1.00
+ sleep_time = init_sleep
+ while True:
+ dc = get_dc()
+ if not dc:
+ logger.warning("DC lost during wait")
+ return False
+ cmd = "crmadmin -S %s" % dc
+ rc, s = ShellUtils().get_stdout(add_sudo(cmd))
+ if rc != 0:
+ logger.error("Exit code of command {} is {}".format(cmd, rc))
+ return False
+ if re.search("S_IDLE.*ok", s):
+ if output_started:
+ sys.stderr.write(" done\n")
+ return True
+ time.sleep(sleep_time)
+ if sleep_time < max_sleep:
+ sleep_time *= 2
+ if show_progress:
+ if not output_started:
+ output_started = 1
+ sys.stderr.write("waiting for %s to finish ." % what)
+ cnt += 1
+ if cnt % 5 == 0:
+ sys.stderr.write(".")
+
+
+def run_ptest(graph_s, nograph, scores, utilization, actions, verbosity):
+ '''
+ Pipe graph_s thru ptest(8). Show graph using dotty if requested.
+ '''
+ actions_filter = "grep LogActions: | grep -vw Leave"
+ ptest = "2>&1 %s -x -" % config.core.ptest
+ if re.search("simulate", ptest) and \
+ not re.search("-[RS]", ptest):
+ ptest = "%s -S" % ptest
+ if verbosity:
+ if actions:
+ verbosity = 'v' * max(3, len(verbosity))
+ ptest = "%s -%s" % (ptest, verbosity.upper())
+ if scores:
+ ptest = "%s -s" % ptest
+ if utilization:
+ ptest = "%s -U" % ptest
+ if config.core.dotty and not nograph:
+ fd, dotfile = mkstemp()
+ ptest = "%s -D %s" % (ptest, dotfile)
+ else:
+ dotfile = None
+ # ptest prints to stderr
+ if actions:
+ ptest = "%s | %s" % (ptest, actions_filter)
+ if options.regression_tests:
+ ptest = ">/dev/null %s" % ptest
+ logger.debug("invoke: %s", ptest)
+ rc, s = ShellUtils().get_stdout(ptest, input_s=graph_s)
+ if rc != 0:
+ logger.debug("'%s' exited with (rc=%d)", ptest, rc)
+ if actions and rc == 1:
+ logger.warning("No actions found.")
+ else:
+ logger.warning("Simulation was unsuccessful (RC=%d).", rc)
+ if dotfile:
+ if os.path.getsize(dotfile) > 0:
+ show_dot_graph(dotfile)
+ else:
+ logger.warning("ptest produced empty dot file")
+ else:
+ if not nograph:
+ logger.info("install graphviz to see a transition graph")
+ if s:
+ page_string(s)
+ return True
+
+
+def is_id_valid(ident):
+ """
+ Verify that the id follows the definition:
+ http://www.w3.org/TR/1999/REC-xml-names-19990114/#ns-qualnames
+ """
+ if not ident:
+ return False
+ id_re = r"^[A-Za-z_][\w._-]*$"
+ return re.match(id_re, ident)
+
+
+def check_range(a):
+ """
+ Verify that the integer range in list a is valid.
+ """
+ if len(a) != 2:
+ return False
+ if not isinstance(a[0], int) or not isinstance(a[1], int):
+ return False
+ return int(a[0]) <= int(a[1])
+
+
+def crm_msec(t):
+ '''
+ See lib/common/utils.c:crm_get_msec().
+ '''
+ convtab = {
+ 'ms': (1, 1),
+ 'msec': (1, 1),
+ 'us': (1, 1000),
+ 'usec': (1, 1000),
+ '': (1000, 1),
+ 's': (1000, 1),
+ 'sec': (1000, 1),
+ 'm': (60*1000, 1),
+ 'min': (60*1000, 1),
+ 'h': (60*60*1000, 1),
+ 'hr': (60*60*1000, 1),
+ }
+ if not t:
+ return -1
+ r = re.match(r"\s*(\d+)\s*([a-zA-Z]+)?", str(t))
+ if not r:
+ return -1
+ if not r.group(2):
+ q = ''
+ else:
+ q = r.group(2).lower()
+ try:
+ mult, div = convtab[q]
+ except KeyError:
+ return -1
+ return (int(r.group(1))*mult) // div
+
+
+def crm_time_cmp(a, b):
+ return crm_msec(a) - crm_msec(b)
+
+
+def shorttime(ts):
+ if isinstance(ts, datetime.datetime):
+ return ts.strftime("%X")
+ if ts is not None:
+ return time.strftime("%X", time.localtime(ts))
+ return time.strftime("%X", time.localtime(0))
+
+
+def shortdate(ts):
+ if isinstance(ts, datetime.datetime):
+ return ts.strftime("%F")
+ if ts is not None:
+ return time.strftime("%F", time.localtime(ts))
+ return time.strftime("%F", time.localtime(0))
+
+
+def file_find_by_name(root, filename):
+ 'Find a file within a tree matching fname'
+ assert root
+ assert filename
+ for root, dirnames, filenames in os.walk(root):
+ for filename in fnmatch.filter(filenames, filename):
+ return os.path.join(root, filename)
+ return None
+
+
+def convert2ints(l):
+ """
+ Convert a list of strings (or a string) to a list of ints.
+ All strings must be ints, otherwise conversion fails and None
+ is returned!
+ """
+ try:
+ if isinstance(l, (tuple, list)):
+ return [int(x) for x in l]
+ # it's a string then
+ return int(l)
+ except ValueError:
+ return None
+
+
+def is_int(s):
+ 'Check if the string can be converted to an integer.'
+ try:
+ int(s)
+ return True
+ except ValueError:
+ return False
+
+
+def is_process(s):
+ """
+ Returns true if argument is the name of a running process.
+
+ s: process name
+ returns Boolean
+ """
+ from os.path import join, basename
+ # find pids of running processes
+ pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
+ for pid in pids:
+ try:
+ cmdline = open(join('/proc', pid, 'cmdline'), 'rb').read()
+ procname = basename(to_ascii(cmdline).replace('\x00', ' ').split(' ')[0])
+ if procname == s:
+ return True
+ except EnvironmentError:
+ # a process may have died since we got the list of pids
+ pass
+ return False
+
+
+def print_stacktrace():
+ """
+ Print the stack at the site of call
+ """
+ import traceback
+ import inspect
+ sf = inspect.currentframe().f_back.f_back
+ traceback.print_stack(sf)
+
+
+def edit_file(fname):
+ 'Edit a file.'
+ if not fname:
+ return
+ if not config.core.editor:
+ return
+ return ext_cmd_nosudo("%s %s" % (config.core.editor, fname))
+
+
+def edit_file_ext(fname, template=''):
+ '''
+ Edit a file via a temporary file.
+ Raises IOError on any error.
+ '''
+ if not os.path.isfile(fname):
+ s = template
+ else:
+ s = open(fname).read()
+ filehash = hash(s)
+ tmpfile = str2tmp(s)
+ try:
+ try:
+ if edit_file(tmpfile) != 0:
+ return
+ s = open(tmpfile, 'r').read()
+ if hash(s) == filehash: # file unchanged
+ return
+ f2 = open(fname, 'w')
+ f2.write(s)
+ f2.close()
+ finally:
+ os.unlink(tmpfile)
+ except OSError as e:
+ raise IOError(e)
+
+
+def need_pager(s, w, h):
+ from math import ceil
+ cnt = 0
+ for l in s.split('\n'):
+ # need to remove color codes
+ l = re.sub(r'\${\w+}', '', l)
+ cnt += int(ceil((len(l) + 0.5) / w))
+ if cnt >= h:
+ return True
+ return False
+
+
+def term_render(s):
+ 'Render for TERM.'
+ try:
+ return term.render(s)
+ except:
+ return s
+
+
+def get_pager_cmd(*extra_opts):
+ 'returns a commandline which calls the configured pager'
+ cmdline = [config.core.pager]
+ if os.path.basename(config.core.pager) == "less":
+ cmdline.append('-R')
+ cmdline.extend(extra_opts)
+ return ' '.join(cmdline)
+
+
+def page_string(s):
+ 'Page string rendered for TERM.'
+ if not s:
+ return
+ constants.need_reset = True
+ w, h = get_winsize()
+ if not need_pager(s, w, h):
+ print(term_render(s))
+ elif not config.core.pager or not can_ask() or options.batch:
+ print(term_render(s))
+ else:
+ pipe_string(get_pager_cmd(), term_render(s).encode('utf-8'))
+ constants.need_reset = False
+
+
+def page_gen(g):
+ 'Page lines generated by generator g'
+ w, h = get_winsize()
+ if not config.core.pager or not can_ask() or options.batch:
+ for line in g:
+ sys.stdout.write(term_render(line))
+ else:
+ pipe_string(get_pager_cmd(), term_render("".join(g)))
+
+
+def page_file(filename):
+ 'Open file in pager'
+ if not os.path.isfile(filename):
+ return
+ return ext_cmd_nosudo(get_pager_cmd(filename), shell=True)
+
+
+def get_winsize():
+ try:
+ import curses
+ curses.setupterm()
+ w = curses.tigetnum('cols')
+ h = curses.tigetnum('lines')
+ except:
+ try:
+ w = os.environ['COLS']
+ h = os.environ['LINES']
+ except KeyError:
+ w = 80
+ h = 25
+ return w, h
+
+
+def multicolumn(l):
+ '''
+ A ls-like representation of a list of strings.
+ A naive approach.
+ '''
+ min_gap = 2
+ w, _ = get_winsize()
+ max_len = 8
+ for s in l:
+ if len(s) > max_len:
+ max_len = len(s)
+ cols = w // (max_len + min_gap) # approx.
+ if not cols:
+ cols = 1
+ col_len = w // cols
+ for i in range(len(l) // cols + 1):
+ s = ''
+ for j in range(i * cols, (i + 1) * cols):
+ if not j < len(l):
+ break
+ if not s:
+ s = "%-*s" % (col_len, l[j])
+ elif (j + 1) % cols == 0:
+ s = "%s%s" % (s, l[j])
+ else:
+ s = "%s%-*s" % (s, col_len, l[j])
+ if s:
+ print(s)
+
+
+def cli_replace_attr(pl, name, new_val):
+ for i, attr in enumerate(pl):
+ if attr[0] == name:
+ attr[1] = new_val
+ return
+
+
+def cli_append_attr(pl, name, val):
+ pl.append([name, val])
+
+
+def lines2cli(s):
+ '''
+ Convert a string into a list of lines. Replace continuation
+ characters. Strip white space, left and right. Drop empty lines.
+ '''
+ cl = []
+ l = s.split('\n')
+ cum = []
+ for p in l:
+ p = p.strip()
+ if p.endswith('\\'):
+ p = p.rstrip('\\')
+ cum.append(p)
+ else:
+ cum.append(p)
+ cl.append(''.join(cum).strip())
+ cum = []
+ if cum: # in case s ends with backslash
+ cl.append(''.join(cum))
+ return [x for x in cl if x]
+
+
+def datetime_is_aware(dt):
+ """
+ Determines if a given datetime.datetime is aware.
+
+ The logic is described in Python's docs:
+ http://docs.python.org/library/datetime.html#datetime.tzinfo
+ """
+ return dt and dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None
+
+
+def make_datetime_naive(dt):
+ """
+ Ensures that the datetime is not time zone-aware:
+
+ The returned datetime object is a naive time in UTC.
+ """
+ if dt and datetime_is_aware(dt):
+ return dt.replace(tzinfo=None) - dt.utcoffset()
+ return dt
+
+
+def total_seconds(td):
+ """
+ Backwards compatible implementation of timedelta.total_seconds()
+ """
+ if hasattr(datetime.timedelta, 'total_seconds'):
+ return td.total_seconds()
+ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) // 10**6
+
+
+def datetime_to_timestamp(dt):
+ """
+ Convert a datetime object into a floating-point second value
+ """
+ try:
+ return total_seconds(make_datetime_naive(dt) - datetime.datetime(1970, 1, 1))
+ except Exception as e:
+ logger.error("datetime_to_timestamp error: %s", e)
+ return None
+
+
+def timestamp_to_datetime(ts):
+ """
+ Convert a timestamp into a naive datetime object
+ """
+ import dateutil
+ import dateutil.tz
+ return make_datetime_naive(datetime.datetime.fromtimestamp(ts).replace(tzinfo=dateutil.tz.tzlocal()))
+
+
+def parse_time(t, quiet=False):
+ '''
+ Try to make sense of the user provided time spec.
+ Use dateutil if available, otherwise strptime.
+ Return the datetime value.
+
+ Also does time zone elimination by passing the datetime
+ through a timestamp conversion if necessary
+
+ TODO: dateutil is very slow, avoid it if possible
+ '''
+ try:
+ from dateutil import parser, tz
+ dt = parser.parse(t)
+
+ if datetime_is_aware(dt):
+ ts = datetime_to_timestamp(dt)
+ if ts is None:
+ return None
+ dt = datetime.datetime.fromtimestamp(ts)
+ else:
+ # convert to UTC from local time
+ dt = dt - tz.tzlocal().utcoffset(dt)
+ except ValueError as msg:
+ if not quiet:
+ logger.error("parse_time %s: %s", t, msg)
+ return None
+ except ImportError as msg:
+ try:
+ tm = time.strptime(t)
+ dt = datetime.datetime(*tm[0:7])
+ except ValueError as msg:
+ logger.error("no dateutil, please provide times as printed by date(1)")
+ return None
+ return dt
+
+
+def parse_to_timestamp(t, quiet=False):
+ '''
+ Read a string and convert it into a UNIX timestamp.
+ Added as an optimization of parse_time to avoid
+ extra conversion steps when result would be converted
+ into a timestamp anyway
+ '''
+ try:
+ from dateutil import parser, tz
+ dt = parser.parse(t)
+
+ if datetime_is_aware(dt):
+ return datetime_to_timestamp(dt)
+ # convert to UTC from local time
+ return total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))
+ except ValueError as msg:
+ if not quiet:
+ logger.error("parse_time %s: %s", t, msg)
+ return None
+ except ImportError as msg:
+ try:
+ tm = time.strptime(t)
+ dt = datetime.datetime(*tm[0:7])
+ return datetime_to_timestamp(dt)
+ except ValueError as msg:
+ logger.error("no dateutil, please provide times as printed by date(1)")
+ return None
+
+
+def save_graphviz_file(ini_f, attr_d):
+ '''
+ Save graphviz settings to an ini file, if it does not exist.
+ '''
+ if os.path.isfile(ini_f):
+ logger.error("%s exists, please remove it first", ini_f)
+ return False
+ try:
+ f = open(ini_f, "wb")
+ except IOError as msg:
+ logger.error(msg)
+ return False
+ import configparser
+ p = configparser.ConfigParser()
+ for section, sect_d in attr_d.items():
+ p.add_section(section)
+ for n, v in sect_d.items():
+ p.set(section, n, v)
+ try:
+ p.write(f)
+ except IOError as msg:
+ logger.error(msg)
+ return False
+ f.close()
+ logger.info("graphviz attributes saved to %s", ini_f)
+ return True
+
+
+def load_graphviz_file(ini_f):
+ '''
+ Load graphviz ini file, if it exists.
+ '''
+ if not os.path.isfile(ini_f):
+ return True, None
+ import configparser
+ p = configparser.ConfigParser()
+ try:
+ p.read(ini_f)
+ except Exception as msg:
+ logger.error(msg)
+ return False, None
+ _graph_d = {}
+ for section in p.sections():
+ d = {}
+ for n, v in p.items(section):
+ d[n] = v
+ _graph_d[section] = d
+ return True, _graph_d
+
+
+def get_pcmk_version():
+ cmd = "/usr/sbin/pacemakerd --version"
+ out = sh.cluster_shell().get_stdout_or_raise_error(cmd)
+ version = out.split()[1]
+ logger.debug("Found pacemaker version: %s", version)
+ return version
+
+
+def get_cib_property(cib_f, attr, dflt=None):
+ """A poor man's get attribute procedure.
+ We don't want heavy parsing, this needs to be relatively
+ fast.
+ """
+ open_t = "<cluster_property_set"
+ close_t = "</cluster_property_set"
+ attr_s = 'name="%s"' % attr
+ ver_patt = re.compile('value="([^"]+)"')
+ ver = dflt # return some version in any case
+ try:
+ f = open(cib_f, "r")
+ except IOError as msg:
+ logger.error(msg)
+ return ver
+ state = 0
+ for s in f:
+ if state == 0:
+ if open_t in s:
+ state += 1
+ elif state == 1:
+ if close_t in s:
+ break
+ if attr_s in s:
+ r = ver_patt.search(s)
+ if r:
+ ver = r.group(1)
+ break
+ f.close()
+ return ver
+
+
+def get_cib_attributes(cib_f, tag, attr_l, dflt_l):
+ """A poor man's get attribute procedure.
+ We don't want heavy parsing, this needs to be relatively
+ fast.
+ """
+ open_t = "<%s " % tag
+ val_patt_l = [re.compile('%s="([^"]+)"' % x) for x in attr_l]
+ val_l = []
+ try:
+ f = open(cib_f, "rb").read()
+ except IOError as msg:
+ logger.error(msg)
+ return dflt_l
+ if os.path.splitext(cib_f)[-1] == '.bz2':
+ cib_bits = bz2.decompress(f)
+ else:
+ cib_bits = f
+ cib_s = to_ascii(cib_bits)
+ for s in cib_s.split('\n'):
+ if s.startswith(open_t):
+ i = 0
+ for patt in val_patt_l:
+ r = patt.search(s)
+ val_l.append(r and r.group(1) or dflt_l[i])
+ i += 1
+ break
+ return val_l
+
+
+def is_larger_than_min_version(version, min_version):
+ return LooseVersion(version) >= LooseVersion(min_version)
+
+
+def is_min_pcmk_ver(min_ver, cib_f=None):
+ if not constants.pcmk_version:
+ if cib_f:
+ constants.pcmk_version = get_cib_property(cib_f, "dc-version")
+ if constants.pcmk_version:
+ logger.debug("Found pacemaker version: %s in cib: %s", constants.pcmk_version, cib_f)
+ else:
+ fatal(f"Failed to get 'dc-version' from {cib_f}")
+ else:
+ constants.pcmk_version = get_pcmk_version()
+ return is_larger_than_min_version(constants.pcmk_version, min_ver)
+
+
+def is_larger_than_pcmk_118(cib_f=None):
+ return is_min_pcmk_ver("1.1.8", cib_f=cib_f)
+
+
+# quote function from python module shlex.py in python 3.3
+
+_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
+
+
+def quote(s):
+ """Return a shell-escaped version of the string *s*."""
+ if not s:
+ return "''"
+ if _find_unsafe(s) is None:
+ return s
+
+ # use single quotes, and put single quotes into double quotes
+ # the string $'b is then quoted as '$'"'"'b'
+ return "'" + s.replace("'", "'\"'\"'") + "'"
+
+
+def doublequote(s):
+ """Return a shell-escaped version of the string *s*."""
+ if not s:
+ return '""'
+ if _find_unsafe(s) is None:
+ return s
+
+ # use double quotes
+ return '"' + s.replace('"', "\\\"") + '"'
+
+
+def fetch_opts(args, opt_l):
+ '''
+ Get and remove option keywords from args.
+ They are always listed last, at the end of the line.
+ Return a list of options found. The caller can do
+ if keyw in optlist: ...
+ '''
+ re_opt = None
+ if opt_l[0].startswith("@"):
+ re_opt = re.compile("^%s$" % opt_l[0][1:])
+ del opt_l[0]
+ l = []
+ for i in reversed(list(range(len(args)))):
+ if (args[i] in opt_l) or (re_opt and re_opt.search(args[i])):
+ l.append(args.pop())
+ else:
+ break
+ return l
+
+
+_LIFETIME = ["reboot", "forever"]
+_ISO8601_RE = re.compile("(PT?[0-9]|[0-9]+.*[:-])")
+
+
+def fetch_lifetime_opt(args, iso8601=True):
+ '''
+ Get and remove a lifetime option from args. It can be one of
+ lifetime_options or an ISO 8601 formatted period/time. There
+ is apparently no good support in python for this format, so
+ we cheat a bit.
+ '''
+ if args:
+ opt = args[-1]
+ if opt in _LIFETIME or (iso8601 and _ISO8601_RE.match(opt)):
+ return args.pop()
+ return None
+
+
+def list_corosync_node_names():
+ '''
+ Returns list of nodes configured
+ in corosync.conf
+ '''
+ try:
+ cfg = os.getenv('COROSYNC_MAIN_CONFIG_FILE', '/etc/corosync/corosync.conf')
+ lines = open(cfg).read().split('\n')
+ name_re = re.compile(r'\s*name:\s+(.*)')
+ names = []
+ for line in lines:
+ name = name_re.match(line)
+ if name:
+ names.append(name.group(1))
+ return names
+ except Exception:
+ return []
+
+
+def list_corosync_nodes():
+ '''
+ Returns list of nodes configured
+ in corosync.conf
+ '''
+ try:
+ cfg = os.getenv('COROSYNC_MAIN_CONFIG_FILE', '/etc/corosync/corosync.conf')
+ lines = open(cfg).read().split('\n')
+ addr_re = re.compile(r'\s*ring0_addr:\s+(.*)')
+ nodes = []
+ for line in lines:
+ addr = addr_re.match(line)
+ if addr:
+ nodes.append(addr.group(1))
+ return nodes
+ except Exception:
+ return []
+
+
+def print_cluster_nodes():
+ """
+ Print the output of crm_node -l
+ """
+ rc, out, _ = ShellUtils().get_stdout_stderr("crm_node -l")
+ if rc == 0 and out:
+ print("{}\n".format(out))
+
+
+def list_cluster_nodes(no_reg=False):
+ '''
+ Returns a list of nodes in the cluster.
+ '''
+ from . import xmlutil
+ cib = None
+ rc, out, err = ShellUtils().get_stdout_stderr(constants.CIB_QUERY, no_reg=no_reg)
+ # When cluster service running
+ if rc == 0:
+ cib = etree.fromstring(out)
+ # Static situation
+ else:
+ cib_path = os.getenv('CIB_file', constants.CIB_RAW_FILE)
+ if not os.path.isfile(cib_path):
+ return None
+ cib = xmlutil.file2cib_elem(cib_path)
+ if cib is None:
+ return None
+
+ node_list = []
+ for node in cib.xpath(constants.XML_NODE_PATH):
+ name = node.get('uname') or node.get('id')
+ if node.get('type') == 'remote':
+ srv = cib.xpath("//primitive[@id='%s']/instance_attributes/nvpair[@name='server']" % (name))
+ if srv:
+ continue
+ node_list.append(name)
+ return node_list
+
+
+def cluster_run_cmd(cmd, node_list=[]):
+ """
+ Run cmd in cluster nodes
+ """
+ nodelist = node_list or list_cluster_nodes()
+ if not nodelist:
+ raise ValueError("Failed to get node list from cluster")
+ return crmsh.parallax.parallax_call(nodelist, cmd)
+
+
+def list_cluster_nodes_except_me():
+ """
+ Get cluster node list and filter out self
+ """
+ node_list = list_cluster_nodes()
+ if not node_list:
+ raise ValueError("Failed to get node list from cluster")
+ me = this_node()
+ if me in node_list:
+ node_list.remove(me)
+ return node_list
+
+
+def service_info(name):
+ p = is_program('systemctl')
+ if p:
+ rc, outp = ShellUtils().get_stdout([p, 'show',
+ '-p', 'UnitFileState',
+ '-p', 'ActiveState',
+ '-p', 'SubState',
+ name + '.service'], shell=False)
+ if rc == 0:
+ info = []
+ for line in outp.split('\n'):
+ data = line.split('=', 1)
+ if len(data) == 2:
+ info.append(data[1].strip())
+ return '/'.join(info)
+ return None
+
+
+def running_on(resource):
+ "returns list of node names where the given resource is running"
+ rsc_locate = "crm_resource --resource '%s' --locate"
+ rc, out, err = ShellUtils().get_stdout_stderr(rsc_locate % (resource))
+ if rc != 0:
+ return []
+ nodes = []
+ head = "resource %s is running on: " % (resource)
+ for line in out.split('\n'):
+ if line.strip().startswith(head):
+ w = line[len(head):].split()
+ if w:
+ nodes.append(w[0])
+ logger.debug("%s running on: %s", resource, nodes)
+ return nodes
+
+
+# This RE matches nvpair values that can
+# be left unquoted
+_NOQUOTES_RE = re.compile(r'^[\w\.-]+$')
+
+
+def noquotes(v):
+ return _NOQUOTES_RE.match(v) is not None
+
+
+def unquote(s):
+ """
+ Reverse shell-quoting a string, so the string '"a b c"'
+ becomes 'a b c'
+ """
+ sp = shlex.split(s)
+ if sp:
+ return sp[0]
+ return ""
+
+
+def parse_sysconfig(sysconfig_file):
+ """
+ Reads a sysconfig file into a dict
+ """
+ ret = {}
+ if os.path.isfile(sysconfig_file):
+ for line in open(sysconfig_file).readlines():
+ if line.lstrip().startswith('#'):
+ continue
+ try:
+ key, val = line.split("=", 1)
+ ret[key] = unquote(val)
+ except ValueError:
+ pass
+ return ret
+
+
+def sysconfig_set(sysconfig_file, **values):
+ """
+ Set the values in the sysconfig file, updating the variables
+ if they exist already, appending them if not.
+ """
+ outp = ""
+ if os.path.isfile(sysconfig_file):
+ for line in open(sysconfig_file).readlines():
+ if line.lstrip().startswith('#'):
+ outp += line
+ else:
+ matched = False
+ try:
+ key, _ = line.split("=", 1)
+ for k, v in values.items():
+ if k == key:
+ matched = True
+ outp += '%s=%s\n' % (k, doublequote(v))
+ del values[k]
+ break
+ if not matched:
+ outp += line
+ except ValueError:
+ outp += line
+
+ for k, v in values.items():
+ outp += '%s=%s\n' % (k, doublequote(v))
+ str2file(outp, sysconfig_file)
+
+
+def remote_diff_slurp(nodes, filename):
+ from . import tmpfiles
+ tmpdir = tmpfiles.create_dir()
+ return crmsh.parallax.parallax_slurp(nodes, tmpdir, filename)
+
+
+def remote_diff_this(local_path, nodes, this_node):
+ by_host = remote_diff_slurp(nodes, local_path)
+ for host, result in by_host:
+ if isinstance(result, crmsh.parallax.Error):
+ raise ValueError("Failed on %s: %s" % (host, str(result)))
+ path = result
+ _, s = ShellUtils().get_stdout("diff -U 0 -d -b --label %s --label %s %s %s" %
+ (host, this_node, path, local_path))
+ page_string(s)
+
+
+def remote_diff(local_path, nodes):
+ by_host = remote_diff_slurp(nodes, local_path)
+ for host, result in by_host:
+ if isinstance(result, crmsh.parallax.Error):
+ raise ValueError("Failed on %s: %s" % (host, str(result)))
+ h1, r1 = by_host[0]
+ h2, r2 = by_host[1]
+ _, s = ShellUtils().get_stdout("diff -U 0 -d -b --label %s --label %s %s %s" %
+ (h1, h2, r1[3], r2[3]))
+ page_string(s)
+
+
+def remote_checksum(local_path, nodes, this_node):
+ import hashlib
+
+ by_host = remote_diff_slurp(nodes, local_path)
+ for host, result in by_host:
+ if isinstance(result, crmsh.parallax.Error):
+ raise ValueError(str(result))
+
+ print("%-16s SHA1 checksum of %s" % ('Host', local_path))
+ if this_node not in nodes:
+ with open(local_path, 'rb') as f:
+ print("%-16s: %s" % (this_node, hashlib.sha1(f.read()).hexdigest()))
+ for host, path in by_host:
+ with open(path, 'rb') as f:
+ print("%-16s: %s" % (host, hashlib.sha1(f.read()).hexdigest()))
+
+
+def cluster_copy_file(local_path, nodes=None, output=True):
+ """
+ Copies given file to all other cluster nodes.
+ """
+ if not nodes:
+ nodes = list_cluster_nodes_except_me()
+ rc = True
+ if not nodes:
+ return rc
+ results = prun.pcopy_to_remote(local_path, nodes, local_path)
+ for host, exc in results.items():
+ if exc is not None:
+ logger.error("Failed to copy %s to %s@%s: %s", local_path, exc.user, host, exc)
+ rc = False
+ else:
+ logger.info("%s", host)
+ logger.debug("Sync file %s to %s", local_path, host)
+ return rc
+
+
+# a set of fnmatch patterns to match attributes whose values
+# should be obscured as a sequence of **** when printed
+_obscured_nvpairs = []
+
+
+def obscured(key, value):
+ if key is not None and value is not None:
+ for o in _obscured_nvpairs:
+ if fnmatch.fnmatch(key, o):
+ return '*' * 6
+ return value
+
+
+@contextmanager
+def obscure(obscure_list):
+ global _obscured_nvpairs
+ prev = _obscured_nvpairs
+ _obscured_nvpairs = obscure_list
+ try:
+ yield
+ finally:
+ _obscured_nvpairs = prev
+
+
+def gen_nodeid_from_ipv6(addr):
+ return int(ipaddress.ip_address(addr)) % 1000000000
+
+
+def _cloud_metadata_request(uri, headers={}):
+ try:
+ import urllib2 as urllib
+ except ImportError:
+ import urllib.request as urllib
+ req = urllib.Request(uri)
+ for header, value in headers.items():
+ req.add_header(header, value)
+ try:
+ resp = urllib.urlopen(req, timeout=5)
+ content = resp.read()
+ if type(content) != str:
+ return content.decode('utf-8').strip()
+ return content.strip()
+ except urllib.URLError:
+ return None
+
+
+def detect_aws():
+ """
+ Detect if in AWS
+ """
+ shell = sh.cluster_shell()
+ # will match on xen instances
+ xen_test = shell.get_stdout_or_raise_error("dmidecode -s system-version").lower()
+ # will match on nitro/kvm instances
+ kvm_test = shell.get_stdout_or_raise_error("dmidecode -s system-manufacturer").lower()
+ if "amazon" in xen_test or "amazon" in kvm_test:
+ return True
+ return False
+
+
+def detect_azure():
+ """
+ Detect if in Azure
+ """
+ # Should check both system-manufacturer and chassis-asset-tag
+ # In some azure environment, dmidecode -s system-manufacturer
+ # might return American Megatrends Inc. instead of Microsoft Corporation in Azure.
+ # The better way is to check the result of dmidecode -s chassis-asset-tag is
+ # 7783-7084-3265-9085-8269-3286-77, aka. the ascii code of MSFT AZURE VM
+ shell = sh.cluster_shell()
+ system_manufacturer = shell.get_stdout_or_raise_error("dmidecode -s system-manufacturer")
+ chassis_asset_tag = shell.get_stdout_or_raise_error("dmidecode -s chassis-asset-tag")
+ if "microsoft corporation" in system_manufacturer.lower() or \
+ ''.join([chr(int(n)) for n in re.findall("\d\d", chassis_asset_tag)]) == "MSFT AZURE VM":
+ # To detect azure we also need to make an API request
+ result = _cloud_metadata_request(
+ "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text",
+ headers={"Metadata": "true"})
+ if result:
+ return True
+ return False
+
+
+def detect_gcp():
+ """
+ Detect if in GCP
+ """
+ bios_vendor = sh.cluster_shell().get_stdout_or_raise_error("dmidecode -s bios-vendor")
+ if "Google" in bios_vendor:
+ # To detect GCP we also need to make an API request
+ result = _cloud_metadata_request(
+ "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip",
+ headers={"Metadata-Flavor": "Google"})
+ if result:
+ return True
+ return False
+
+
+@memoize
+def detect_cloud():
+ """
+ Tries to determine which (if any) cloud environment
+ the cluster is running on.
+
+ This is mainly done using dmidecode.
+
+ If the host cannot be determined, this function
+ returns None. Otherwise, it returns a string
+ identifying the platform.
+
+ These are the currently known platforms:
+
+ * amazon-web-services
+ * microsoft-azure
+ * google-cloud-platform
+
+ """
+ if not is_program("dmidecode"):
+ return None
+ aws = detect_aws()
+ if aws:
+ return constants.CLOUD_AWS
+ azure = detect_azure()
+ if azure:
+ return constants.CLOUD_AZURE
+ gcp = detect_gcp()
+ if gcp:
+ return constants.CLOUD_GCP
+ return None
+
+
+def debug_timestamp():
+ return datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
+
+
+def check_ssh_passwd_need(local_user, remote_user, host):
+ """
+ Check whether access to host need password
+ """
+ ssh_options = "-o StrictHostKeyChecking=no -o EscapeChar=none -o ConnectTimeout=15"
+ ssh_cmd = "{} ssh {} -T -o Batchmode=yes {}@{} true".format(get_ssh_agent_str(), ssh_options, remote_user, host)
+ rc, _ = sh.LocalShell().get_rc_and_error(local_user, ssh_cmd)
+ return rc != 0
+
+
+def get_ssh_agent_str():
+ ssh_agent_str = ""
+ if crmsh.user_of_host.instance().use_ssh_agent():
+ ssh_agent_str = f"SSH_AUTH_SOCK={os.environ.get('SSH_AUTH_SOCK')}"
+ return ssh_agent_str
+
+
+def check_port_open(ip, port):
+ import socket
+
+ family = socket.AF_INET6 if IP.is_ipv6(ip) else socket.AF_INET
+ with closing(socket.socket(family, socket.SOCK_STREAM)) as sock:
+ if sock.connect_ex((ip, port)) == 0:
+ return True
+ else:
+ return False
+
+
+def valid_port(port):
+ return int(port) >= 1024 and int(port) <= 65535
+
+
+def is_qdevice_configured():
+ from . import corosync
+ return corosync.get_value("quorum.device.model") == "net"
+
+
+def is_qdevice_tls_on():
+ from . import corosync
+ return corosync.get_value("quorum.device.net.tls") == "on"
+
+
+def get_nodeinfo_from_cmaptool():
+ nodeid_ip_dict = {}
+ rc, out = ShellUtils().get_stdout("corosync-cmapctl -b runtime.totem.pg.mrp.srp.members")
+ if rc != 0:
+ return nodeid_ip_dict
+
+ for line in out.split('\n'):
+ match = re.search(r'members\.(.*)\.ip', line)
+ if match:
+ node_id = match.group(1)
+ iplist = re.findall(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', line)
+ nodeid_ip_dict[node_id] = iplist
+ return nodeid_ip_dict
+
+
+def get_iplist_from_name(name):
+ """
+ Given node host name, return this host's ip list in corosync cmap
+ """
+ ip_list = []
+ nodeid = get_nodeid_from_name(name)
+ if not nodeid:
+ return ip_list
+ nodeinfo = {}
+ nodeinfo = get_nodeinfo_from_cmaptool()
+ if not nodeinfo:
+ return ip_list
+ return nodeinfo[nodeid]
+
+
+def valid_nodeid(nodeid):
+ if not ServiceManager().service_is_active('corosync.service'):
+ return False
+
+ for _id, _ in get_nodeinfo_from_cmaptool().items():
+ if _id == nodeid:
+ return True
+ return False
+
+
+def get_nodeid_from_name(name):
+ rc, out = ShellUtils().get_stdout('crm_node -l')
+ if rc != 0:
+ return None
+ res = re.search(r'^([0-9]+) {} '.format(name), out, re.M)
+ if res:
+ return res.group(1)
+ else:
+ return None
+
+
+def check_empty_option_value(options):
+ if not isinstance(options, argparse.Namespace):
+ raise ValueError("Expected type of \"options\" is \"argparse.Namespace\", not \"{}\"".format(type(options)))
+
+ for opt in vars(options):
+ value = getattr(options, opt)
+ if isinstance(value, str) and len(value.strip()) == 0:
+ raise ValueError("Empty value not allowed for dest \"{}\"".format(opt))
+
+
+def interface_choice():
+ _, out = ShellUtils().get_stdout("ip a")
+ # should consider interface format like "ethx@xxx"
+ interface_list = re.findall(r'(?:[0-9]+:) (.*?)(?=: |@.*?: )', out)
+ return [nic for nic in interface_list if nic != "lo"]
+
+
+class IP(object):
+ """
+ Class to get some properties of IP address
+ """
+
+ def __init__(self, addr):
+ """
+ Init function
+ """
+ self.addr = addr
+
+ @property
+ def ip_address(self):
+ """
+ Create ipaddress instance
+ """
+ return ipaddress.ip_address(self.addr)
+
+ @property
+ def version(self):
+ """
+ Get IP address version
+ """
+ return self.ip_address.version
+
+ @classmethod
+ def is_mcast(cls, addr):
+ """
+ Check whether the address is multicast address
+ """
+ cls_inst = cls(addr)
+ return cls_inst.ip_address.is_multicast
+
+ @classmethod
+ def is_ipv6(cls, addr):
+ """
+ Check whether the address is IPV6 address
+ """
+ return cls(addr).version == 6
+
+ @property
+ def is_loopback(self):
+ """
+ Check whether the address is loopback address
+ """
+ return self.ip_address.is_loopback
+
+
+class Interface(IP):
+ """
+ Class to get information from one interface
+ """
+
+ def __init__(self, ip_with_mask):
+ """
+ Init function
+ """
+ self.ip, self.mask = ip_with_mask.split('/')
+ super(__class__, self).__init__(self.ip)
+
+ @property
+ def ip_with_mask(self):
+ """
+ Get ip with netmask
+ """
+ return '{}/{}'.format(self.ip, self.mask)
+
+ @property
+ def ip_interface(self):
+ """
+ Create ip_interface instance
+ """
+ return ipaddress.ip_interface(self.ip_with_mask)
+
+ @property
+ def network(self):
+ """
+ Get network address
+ """
+ return str(self.ip_interface.network.network_address)
+
+
+class InterfacesInfo(object):
+ """
+ Class to collect interfaces information on local node
+ """
+
+ def __init__(self, ipv6=False, second_heartbeat=False, custom_nic_list=[]):
+ """
+ Init function
+
+ On init process,
+ "ipv6" is provided by -I option
+ "second_heartbeat" is provided by -M option
+ "custom_nic_list" is provided by -i option
+ """
+ self.ip_version = 6 if ipv6 else 4
+ self.second_heartbeat = second_heartbeat
+ self._default_nic_list = custom_nic_list
+ self._nic_info_dict = {}
+
+ def get_interfaces_info(self):
+ """
+ Try to get interfaces info dictionary via "ip" command
+
+ IMPORTANT: This is the method that populates the data, should always be called after initialize
+ """
+ cmd = "ip -{} -o addr show".format(self.ip_version)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd)
+ if rc != 0:
+ raise ValueError(err)
+
+ # format on each line will like:
+ # 2: enp1s0 inet 192.168.122.241/24 brd 192.168.122.255 scope global enp1s0\ valid_lft forever preferred_lft forever
+ for line in out.splitlines():
+ _, nic, _, ip_with_mask, *_ = line.split()
+ # maybe from tun interface
+ if not '/' in ip_with_mask:
+ continue
+ interface_inst = Interface(ip_with_mask)
+ if interface_inst.is_loopback:
+ continue
+ # one nic might configured multi IP addresses
+ if nic not in self._nic_info_dict:
+ self._nic_info_dict[nic] = []
+ self._nic_info_dict[nic].append(interface_inst)
+
+ if not self._nic_info_dict:
+ raise ValueError("No address configured")
+ if self.second_heartbeat and len(self._nic_info_dict) == 1:
+ raise ValueError("Cannot configure second heartbeat, since only one address is available")
+
+ @property
+ def nic_list(self):
+ """
+ Get interfaces name list
+ """
+ return list(self._nic_info_dict.keys())
+
+ @property
+ def interface_list(self):
+ """
+ Get instance list of class Interface
+ """
+ _interface_list = []
+ for interface in self._nic_info_dict.values():
+ _interface_list.extend(interface)
+ return _interface_list
+
+ @property
+ def ip_list(self):
+ """
+ Get IP address list
+ """
+ return [interface.ip for interface in self.interface_list]
+
+ @classmethod
+ def get_local_ip_list(cls, is_ipv6):
+ """
+ Get IP address list
+ """
+ cls_inst = cls(is_ipv6)
+ cls_inst.get_interfaces_info()
+ return cls_inst.ip_list
+
+ @classmethod
+ def ip_in_local(cls, addr):
+ """
+ Check whether given address was in one of local address
+ """
+ cls_inst = cls(IP.is_ipv6(addr))
+ cls_inst.get_interfaces_info()
+ return addr in cls_inst.ip_list
+
+ @property
+ def network_list(self):
+ """
+ Get network list
+ """
+ return list(set([interface.network for interface in self.interface_list]))
+
+ def _nic_first_ip(self, nic):
+ """
+ Get the first IP of specific nic
+ """
+ return self._nic_info_dict[nic][0].ip
+
+ def get_default_nic_list_from_route(self):
+ """
+ Get default nic list from route
+ """
+ if self._default_nic_list:
+ return self._default_nic_list
+
+ #TODO what if user only has ipv6 route?
+ cmd = "ip -o route show"
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd)
+ if rc != 0:
+ raise ValueError(err)
+ res = re.search(r'^default via .* dev (.*?) ', out)
+ if res:
+ self._default_nic_list = [res.group(1)]
+ else:
+ if not self.nic_list:
+ self.get_interfaces_info()
+ logger.warning("No default route configured. Using the first found nic")
+ self._default_nic_list = [self.nic_list[0]]
+ return self._default_nic_list
+
+ def get_default_ip_list(self):
+ """
+ Get default IP list will be used by corosync
+ """
+ if not self._default_nic_list:
+ self.get_default_nic_list_from_route()
+ if not self.nic_list:
+ self.get_interfaces_info()
+
+ _ip_list = []
+ for nic in self._default_nic_list:
+ # in case given interface not exist
+ if nic not in self.nic_list:
+ raise ValueError("Failed to detect IP address for {}".format(nic))
+ _ip_list.append(self._nic_first_ip(nic))
+ # in case -M specified but given one interface via -i
+ if self.second_heartbeat and len(self._default_nic_list) == 1:
+ for nic in self.nic_list:
+ if nic not in self._default_nic_list:
+ _ip_list.append(self._nic_first_ip(nic))
+ break
+ return _ip_list
+
+
+def check_file_content_included(source_file, target_file, remote=None, source_local=False):
+ """
+ Check whether target_file includes contents of source_file
+ """
+ if not detect_file(source_file, remote=None if source_local else remote):
+ raise ValueError("File {} not exist".format(source_file))
+ if not detect_file(target_file, remote=remote):
+ return False
+
+ shell = sh.cluster_shell()
+ cmd = "cat {}".format(target_file)
+ target_data = shell.get_stdout_or_raise_error(cmd, host=remote)
+ cmd = "cat {}".format(source_file)
+ source_data = shell.get_stdout_or_raise_error(cmd, host=None if source_local else remote)
+ return source_data in target_data
+
+def check_text_included(text, target_file, remote=None):
+ "Check whether target_file includes the text"
+ if not detect_file(target_file, remote=remote):
+ return False
+
+ cmd = "cat {}".format(target_file)
+ target_data = sh.cluster_shell().get_stdout_or_raise_error(cmd, remote)
+ return text in target_data
+
+
+def package_is_installed(pkg, remote_addr=None):
+ """
+ Check if package is installed
+ """
+ cmd = "rpm -q --quiet {}".format(pkg)
+ if remote_addr:
+ # check on remote
+ rc, _, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(remote_addr, cmd)
+ else:
+ # check on local
+ rc, _ = ShellUtils().get_stdout(cmd)
+ return rc == 0
+
+
+def ping_node(node):
+ """
+ Check if the remote node is reachable
+ """
+ rc, _, err = ShellUtils().get_stdout_stderr("ping -c 1 {}".format(node))
+ if rc != 0:
+ raise ValueError("host \"{}\" is unreachable: {}".format(node, err))
+
+
+def calculate_quorate_status(expected_votes, actual_votes):
+ """
+ Given expected votes and actual votes, calculate if is quorated
+ """
+ return int(actual_votes)/int(expected_votes) > 0.5
+
+
+def get_quorum_votes_dict(remote=None):
+ """
+ Return a dictionary which contain expect votes and total votes
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("corosync-quorumtool -s", remote, success_exit_status={0, 2})
+ return dict(re.findall("(Expected|Total) votes:\s+(\d+)", out))
+
+
+def check_all_nodes_reachable():
+ """
+ Check if all cluster nodes are reachable
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm_node -l")
+ for node in re.findall("\d+ (.*) \w+", out):
+ ping_node(node)
+
+
+def re_split_string(reg, string):
+ """
+ Split a string by a regrex, filter out empty items
+ """
+ return [x for x in re.split(reg, string) if x]
+
+
+def is_block_device(dev):
+ """
+ Check if dev is a block device
+ """
+ try:
+ rc = S_ISBLK(os.stat(dev).st_mode)
+ except OSError:
+ return False
+ return rc
+
+
+def has_stonith_running():
+ """
+ Check if any stonith device registered
+ """
+ from . import sbd
+ out = sh.cluster_shell().get_stdout_or_raise_error("stonith_admin -L")
+ has_stonith_device = re.search("[1-9]+ fence device[s]* found", out) is not None
+ using_diskless_sbd = sbd.SBDManager.is_using_diskless_sbd()
+ return has_stonith_device or using_diskless_sbd
+
+
+def has_disk_mounted(dev):
+ """
+ Check if device already mounted
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("mount")
+ return re.search("\n{} on ".format(dev), out) is not None
+
+
+def has_mount_point_used(directory):
+ """
+ Check if mount directory already mounted
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("mount")
+ return re.search(" on {}".format(directory), out) is not None
+
+
+def all_exist_id():
+ """
+ Get current exist id list
+ """
+ from .cibconfig import cib_factory
+ cib_factory.refresh()
+ return cib_factory.id_list()
+
+
+def randomword(length=6):
+ """
+ Generate random word
+ """
+ letters = string.ascii_lowercase
+ return ''.join(random.choice(letters) for i in range(length))
+
+
+def gen_unused_id(exist_id_list, prefix="", length=6):
+ """
+ Generate unused id
+ """
+ unused_id = prefix or randomword(length)
+ while unused_id in exist_id_list:
+ unused_id = re.sub("$", "-{}".format(randomword(length)), unused_id)
+ return unused_id
+
+
+def get_all_vg_name():
+ """
+ Get all available VGs
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("vgdisplay")
+ return re.findall("VG Name\s+(.*)", out)
+
+
+def get_pe_number(vg_id):
+ """
+ Get pe number
+ """
+ output = sh.cluster_shell().get_stdout_or_raise_error("vgdisplay {}".format(vg_id))
+ res = re.search("Total PE\s+(\d+)", output)
+ if not res:
+ raise ValueError("Cannot find PE on VG({})".format(vg_id))
+ return int(res.group(1))
+
+
+def has_dev_partitioned(dev, peer=None):
+ """
+ Check if device has partitions
+ """
+ return len(get_dev_info(dev, "NAME", peer=peer).splitlines()) > 1
+
+
+def get_dev_uuid(dev, peer=None):
+ """
+ Get UUID of device on local or peer node
+ """
+ out = get_dev_info(dev, "UUID", peer=peer).splitlines()
+ return out[0] if out else get_dev_uuid_2(dev, peer)
+
+
+def get_dev_uuid_2(dev, peer=None):
+ """
+ Get UUID of device using blkid
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("blkid {}".format(dev), peer)
+ res = re.search("UUID=\"(.*?)\"", out)
+ return res.group(1) if res else None
+
+
+def get_dev_fs_type(dev, peer=None):
+ """
+ Get filesystem type of device
+ """
+ return get_dev_info(dev, "FSTYPE", peer=peer)
+
+
+def get_dev_info(dev, *_type, peer=None):
+ """
+ Get device info using lsblk
+ """
+ cmd = "lsblk -fno {} {}".format(','.join(_type), dev)
+ return sh.cluster_shell().get_stdout_or_raise_error(cmd, peer)
+
+
+def is_dev_used_for_lvm(dev, peer=None):
+ """
+ Check if device is LV
+ """
+ return "lvm" in get_dev_info(dev, "TYPE", peer=peer)
+
+
+def is_dev_a_plain_raw_disk_or_partition(dev, peer=None):
+ """
+ Check if device is a raw disk or partition
+ """
+ out = get_dev_info(dev, "TYPE", peer=peer)
+ return re.search("(disk|part)", out) is not None
+
+
+def compare_uuid_with_peer_dev(dev_list, peer):
+ """
+ Check if device UUID is the same with peer's device
+ """
+ for dev in dev_list:
+ local_uuid = get_dev_uuid(dev)
+ if not local_uuid:
+ raise ValueError("Cannot find UUID for {} on local".format(dev))
+ peer_uuid = get_dev_uuid(dev, peer)
+ if not peer_uuid:
+ raise ValueError("Cannot find UUID for {} on {}".format(dev, peer))
+ if local_uuid != peer_uuid:
+ raise ValueError("UUID of {} not same with peer {}".format(dev, peer))
+
+
+def append_res_to_group(group_id, res_id):
+ """
+ Append resource to exist group
+ """
+ cmd = "crm configure modgroup {} add {}".format(group_id, res_id)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+
+def get_qdevice_sync_timeout():
+ """
+ Get qdevice sync_timeout
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm corosync status qdevice")
+ res = re.search("Sync HB interval:\s+(\d+)ms", out)
+ if not res:
+ raise ValueError("Cannot find qdevice sync timeout")
+ return int(int(res.group(1))/1000)
+
+
+def detect_virt():
+ """
+ Detect if running in virt environment
+ """
+ rc, _, _ = ShellUtils().get_stdout_stderr("systemd-detect-virt")
+ return rc == 0
+
+
+def fatal(error_msg):
+ """
+ Raise exception to jump over this module,
+ handled by Context.run in ui_context.py
+ """
+ raise ValueError(error_msg)
+
+
+def is_standby(node):
+ """
+ Check if the node is already standby
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm_mon -1")
+ return re.search(r'Node\s+{}:\s+standby'.format(node), out) is not None
+
+
+def get_dlm_option_dict(peer=None):
+ """
+ Get dlm config option dictionary
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("dlm_tool dump_config", peer)
+ return dict(re.findall("(\w+)=(\w+)", out))
+
+
+def set_dlm_option(peer=None, **kargs):
+ """
+ Set dlm option
+ """
+ shell = sh.cluster_shell()
+ dlm_option_dict = get_dlm_option_dict(peer=peer)
+ for option, value in kargs.items():
+ if option not in dlm_option_dict:
+ raise ValueError(f'"{option}" is not dlm config option')
+ if dlm_option_dict[option] != value:
+ shell.get_stdout_or_raise_error(f'dlm_tool set_config "{option}={value}"', peer)
+
+
+def is_dlm_running(peer=None):
+ """
+ Check if dlm ra controld is running
+ """
+ return is_resource_running(constants.DLM_CONTROLD_RA, peer=peer)
+
+
+def has_resource_configured(ra_type, peer=None):
+ """
+ Check if the RA configured
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm_mon -1rR", peer)
+ return re.search(ra_type, out) is not None
+
+
+def is_resource_running(ra_type, peer=None):
+ """
+ Check if the RA running
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm_mon -1rR", peer)
+ patt = f"\({ra_type}\):\s*Started"
+ return re.search(patt, out) is not None
+
+
+def is_dlm_configured(peer=None):
+ """
+ Check if dlm configured
+ """
+ return has_resource_configured(constants.DLM_CONTROLD_RA, peer=peer)
+
+
+def is_quorate(peer=None):
+ """
+ Check if cluster is quorated
+ """
+ out = sh.cluster_shell().get_stdout_or_raise_error("corosync-quorumtool -s", peer, success_exit_status={0, 2})
+ res = re.search(r'Quorate:\s+(.*)', out)
+ if res:
+ return res.group(1) == "Yes"
+ else:
+ raise ValueError("Failed to get quorate status from corosync-quorumtool")
+
+
+def is_2node_cluster_without_qdevice():
+ """
+ Check if current cluster has two nodes without qdevice
+ """
+ current_num = len(list_cluster_nodes())
+ qdevice_num = 1 if is_qdevice_configured() else 0
+ return (current_num + qdevice_num) == 2
+
+
+def get_pcmk_delay_max(two_node_without_qdevice=False):
+ """
+ Get value of pcmk_delay_max
+ """
+ if ServiceManager().service_is_active("pacemaker.service") and two_node_without_qdevice:
+ return constants.PCMK_DELAY_MAX
+ return 0
+
+
+def get_property(name, property_type="crm_config", peer=None):
+ """
+ Get cluster properties
+
+ "property_type" can be crm_config|rsc_defaults|op_defaults
+ """
+ if property_type == "crm_config":
+ cib_path = os.getenv('CIB_file', constants.CIB_RAW_FILE)
+ cmd = "CIB_file={} sudo --preserve-env=CIB_file crm configure get_property {}".format(cib_path, name)
+ else:
+ cmd = "sudo crm_attribute -t {} -n {} -Gq".format(property_type, name)
+ rc, stdout, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(peer, cmd)
+ return stdout if rc == 0 else None
+
+
+def check_no_quorum_policy_with_dlm():
+ """
+ Give warning when no-quorum-policy not freeze while configured DLM
+ """
+ if not is_dlm_configured():
+ return
+ res = get_property("no-quorum-policy")
+ if not res or res != "freeze":
+ logger.warning("The DLM cluster best practice suggests to set the cluster property \"no-quorum-policy=freeze\"")
+
+
+def set_property(property_name, property_value, property_type="crm_config", conditional=False):
+ """
+ Set property for cluster, resource and operator
+
+ "property_type" can be crm_config|rsc_defaults|op_defaults
+ When "conditional" is True, set the property if given "property_value" is larger then value from cib
+ """
+ origin_value = get_property(property_name, property_type)
+ if origin_value and str(origin_value) == str(property_value):
+ return
+ if conditional:
+ if crm_msec(origin_value) >= crm_msec(property_value):
+ return
+ if origin_value and str(origin_value) != str(property_value):
+ logger.warning("\"{}\" in {} is set to {}, it was {}".format(property_name, property_type, property_value, origin_value))
+ property_sub_cmd = "property" if property_type == "crm_config" else property_type
+ cmd = "crm configure {} {}={}".format(property_sub_cmd, property_name, property_value)
+ sh.cluster_shell().get_stdout_or_raise_error(cmd)
+
+
+def get_systemd_timeout_start_in_sec(time_res):
+ """
+ Get the TimeoutStartUSec value in second unit
+ The origin format was like: 1min 30s
+ """
+ res_seconds = re.search("(\d+)s", time_res)
+ start_timeout = int(res_seconds.group(1)) if res_seconds else 0
+ res_min = re.search("(\d+)min", time_res)
+ start_timeout += 60 * int(res_min.group(1)) if res_min else 0
+ return start_timeout
+
+
+def is_ocf_1_1_cib_schema_detected():
+ """
+ Only turn on ocf_1_1 feature the cib schema version is pacemaker-3.7 or above
+ """
+ from .cibconfig import cib_factory
+ cib_factory.get_cib()
+ return is_larger_than_min_version(cib_factory.get_schema(), constants.SCHEMA_MIN_VER_SUPPORT_OCF_1_1)
+
+
+def compatible_role(role1, role2):
+ master_or_promoted = (constants.RSC_ROLE_PROMOTED_LEGACY, constants.RSC_ROLE_PROMOTED)
+ slave_or_unpromoted = (constants.RSC_ROLE_UNPROMOTED_LEGACY, constants.RSC_ROLE_UNPROMOTED)
+ res1 = role1 in master_or_promoted and role2 in master_or_promoted
+ res2 = role1 in slave_or_unpromoted and role2 in slave_or_unpromoted
+ return res1 or res2
+
+
+auto_convert_role = True
+
+
+def handle_role_for_ocf_1_1(value, name='role'):
+ """
+ * Convert role from Promoted/Unpromoted to Master/Slave if schema doesn't support OCF 1.1
+ * Convert role from Master/Slave to Promoted/Unpromoted if ocf1.1 cib schema detected and OCF_1_1_SUPPORT is yes
+ """
+ role_names = ["role", "target-role"]
+ downgrade_dict = {
+ constants.RSC_ROLE_PROMOTED: constants.RSC_ROLE_PROMOTED_LEGACY,
+ constants.RSC_ROLE_UNPROMOTED: constants.RSC_ROLE_UNPROMOTED_LEGACY
+ }
+ upgrade_dict = {v: k for k, v in downgrade_dict.items()}
+
+ if name not in role_names:
+ return value
+ if value in downgrade_dict and not is_ocf_1_1_cib_schema_detected():
+ logger.warning('Convert "%s" to "%s" since the current schema version is old and not upgraded yet. Please consider "%s"', value, downgrade_dict[value], constants.CIB_UPGRADE)
+ return downgrade_dict[value]
+ if value in upgrade_dict and is_ocf_1_1_cib_schema_detected() and config.core.OCF_1_1_SUPPORT and auto_convert_role:
+ logger.info('Convert deprecated "%s" to "%s"', value, upgrade_dict[value])
+ return upgrade_dict[value]
+
+ return value
+
+
+def diff_and_patch(orig_cib_str, current_cib_str):
+ """
+ Use crm_diff to generate patch, then apply
+ """
+ # In cibconfig.py, _patch_cib method doesn't include status section
+ # So here should make a function to handle common cases
+ from . import tmpfiles
+ orig_cib_file = str2tmp(orig_cib_str, suffix=".xml")
+ current_cib_file = str2tmp(current_cib_str, suffix=".xml")
+ tmpfiles.add(orig_cib_file)
+ tmpfiles.add(current_cib_file)
+
+ cmd = "crm_diff -u -o '{}' -n '{}'".format(orig_cib_file, current_cib_file)
+ rc, cib_diff, err = ShellUtils().get_stdout_stderr(cmd)
+ if rc == 0: # no difference
+ return True
+ if err:
+ logger.error("Failed to run crm_diff: %s", err)
+ return False
+ logger.debug("Diff: %s", cib_diff)
+ rc = pipe_string("cibadmin -p -P --force", cib_diff)
+ if rc != 0:
+ logger.error("Failed to patch")
+ return False
+ return True
+
+
+def detect_file(_file, remote=None):
+ """
+ Detect if file exists, support both local and remote
+ """
+ rc = False
+ if not remote:
+ cmd = "test -f {}".format(_file)
+ else:
+ # FIXME
+ cmd = "ssh {} {}@{} 'test -f {}'".format(SSH_OPTION, user_of(remote), remote, _file)
+ code, _, _ = ShellUtils().get_stdout_stderr(cmd)
+ rc = code == 0
+ return rc
+
+
+def check_function_with_timeout(check_function, wait_timeout=30, interval=1, *args, **kwargs):
+ """
+ Run check_function in a loop
+ Return when check_function is true
+ Raise TimeoutError when timeout
+ """
+ current_time = int(time.time())
+ timeout = current_time + wait_timeout
+ while current_time <= timeout:
+ if check_function(*args, **kwargs):
+ return
+ time.sleep(interval)
+ current_time = int(time.time())
+ raise TimeoutError
+
+
+def fetch_cluster_node_list_from_node(init_node):
+ """
+ Fetch cluster member list from one known cluster node
+ """
+ cluster_nodes_list = []
+ out = sh.cluster_shell().get_stdout_or_raise_error("crm_node -l", init_node)
+ for line in out.splitlines():
+ # Parse line in format: <id> <nodename> <state>, and collect the nodename.
+ tokens = line.split()
+ if len(tokens) == 0:
+ pass # Skip any spurious empty line.
+ elif len(tokens) < 3:
+ logger.warning("The node '%s' has no known name and/or state information", tokens[0])
+ elif tokens[2] != "member":
+ logger.warning("The node '%s'(state '%s') is not a current member", tokens[1], tokens[2])
+ else:
+ cluster_nodes_list.append(tokens[1])
+ return cluster_nodes_list
+
+
+def has_sudo_access():
+ """
+ Check if current user has sudo access
+ """
+ rc, _, _ = ShellUtils().get_stdout_stderr("sudo -S -k -n id -u")
+ return rc == 0
+
+
+def in_haclient():
+ """
+ Check if current user is in haclient group
+ """
+ return constants.HA_GROUP in [grp.getgrgid(g).gr_name for g in os.getgroups()]
+
+
+def check_user_access(level_name):
+ """
+ Check current user's privilege and give hints to user
+ """
+ current_user = userdir.getuser()
+ if current_user == "root":
+ return
+ if level_name != "cluster" and in_haclient():
+ return
+
+ if not has_sudo_access():
+ if level_name == "cluster":
+ hints = f"""Please run this command starting with "sudo".
+Currently, this command needs to use sudo to escalate itself as root.
+Please consider to add "{current_user}" as sudoer. For example:
+ sudo bash -c 'echo "{current_user} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/{current_user}'"""
+ else:
+ hints = f"""This command needs higher privilege.
+Option 1) Please consider to add "{current_user}" as sudoer. For example:
+ sudo bash -c 'echo "{current_user} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/{current_user}'
+Option 2) Add "{current_user}" to the haclient group. For example:
+ sudo usermod -g haclient {current_user}"""
+ logger.error(hints)
+ else:
+ logger.error("Please run this command starting with \"sudo\"")
+ raise TerminateSubCommand
+
+
+class HostUserConfig:
+ """Keep the username used for ssh connection corresponding to each host.
+
+ The data is saved in configuration option `core.hosts`.
+ """
+ def __init__(self):
+ self._hosts_users = dict()
+ self._no_generating_ssh_key = False
+ self.load()
+
+ def load(self):
+ self._load_hosts_users()
+ self._load_no_generating_ssh_key()
+
+ def _load_hosts_users(self):
+ users = list()
+ hosts = list()
+ li = config.get_option('core', 'hosts')
+ if li == ['']:
+ self._hosts_users = dict()
+ return
+ for s in li:
+ parts = s.split('@', 2)
+ if len(parts) != 2:
+ raise ValueError('Malformed config core.hosts: {}'.format(s))
+ users.append(parts[0])
+ hosts.append(parts[1])
+ self._hosts_users = {host: user for user, host in zip(users, hosts)}
+
+ def _load_no_generating_ssh_key(self):
+ self._no_generating_ssh_key = config.get_option('core', 'no_generating_ssh_key')
+
+ def save_local(self):
+ value = [f'{user}@{host}' for host, user in sorted(self._hosts_users.items(), key=lambda x: x[0])]
+ config.set_option('core', 'hosts', value)
+ config.set_option('core', 'no_generating_ssh_key', self._no_generating_ssh_key)
+ debug_on = config.get_option('core', 'debug')
+ if debug_on:
+ config.set_option('core', 'debug', 'false')
+ config.save()
+ if debug_on:
+ config.set_option('core', 'debug', 'true')
+
+ def save_remote(self, remote_hosts: typing.Iterable[str]):
+ self.save_local()
+ value = [f'{user}@{host}' for host, user in sorted(self._hosts_users.items(), key=lambda x: x[0])]
+ crmsh.parallax.parallax_call(remote_hosts, "crm options set core.hosts '{}'".format(', '.join(value)))
+ crmsh.parallax.parallax_call(remote_hosts, "crm options set core.no_generating_ssh_key '{}'".format(
+ 'yes' if self._no_generating_ssh_key else 'no'
+ ))
+
+ def get(self, host):
+ return self._hosts_users[host]
+
+ def add(self, user, host):
+ self._hosts_users[host] = user
+
+ def set_no_generating_ssh_key(self, value: bool):
+ self._no_generating_ssh_key = value
+
+ def get_no_generating_ssh_key(self) -> bool:
+ return self._no_generating_ssh_key
+
+def parse_user_at_host(s: str):
+ i = s.find('@')
+ if i == -1:
+ return None, s
+ else:
+ return s[:i], s[i+1:]
+
+
+def file_is_empty(file: str) -> bool:
+ return os.stat(file).st_size == 0
+
+
+def get_open_method(infile):
+ """
+ Get the appropriate file open method based on the file extension
+ """
+ file_type_open_dict = {
+ "gz": gzip.open,
+ "bz2": bz2.open,
+ "xz": lzma.open
+ }
+ file_ext = infile.split('.')[-1]
+ return file_type_open_dict.get(file_ext, open)
+
+
+def read_from_file(infile: str) -> str:
+ """
+ Read content from a file
+ """
+ _open = get_open_method(infile)
+ try:
+ with _open(infile, 'rt', encoding='utf-8', errors='replace') as f:
+ data = f.read()
+ except Exception as err:
+ logger.error("When reading file \"%s\": %s", infile, str(err))
+ return ""
+
+ return data
+# vim:ts=4:sw=4:et:
diff --git a/crmsh/watchdog.py b/crmsh/watchdog.py
new file mode 100644
index 0000000..6d0d2cf
--- /dev/null
+++ b/crmsh/watchdog.py
@@ -0,0 +1,179 @@
+import re
+from . import utils
+from .constants import SSH_OPTION
+from .bootstrap import invoke, invokerc, WATCHDOG_CFG, SYSCONFIG_SBD
+from .sh import ShellUtils
+
+
+class Watchdog(object):
+ """
+ Class to find valid watchdog device name
+ """
+ QUERY_CMD = "sudo sbd query-watchdog"
+ DEVICE_FIND_REGREX = "\[[0-9]+\] (/dev/.*)\n.*\nDriver: (.*)"
+
+ def __init__(self, _input=None, remote_user=None, peer_host=None):
+ """
+ Init function
+ """
+ self._input = _input
+ self._remote_user = remote_user
+ self._peer_host = peer_host
+ self._watchdog_info_dict = {}
+ self._watchdog_device_name = None
+
+ @property
+ def watchdog_device_name(self):
+ return self._watchdog_device_name
+
+ @staticmethod
+ def _verify_watchdog_device(dev, ignore_error=False):
+ """
+ Use wdctl to verify watchdog device
+ """
+ rc, _, err = ShellUtils().get_stdout_stderr("wdctl {}".format(dev))
+ if rc != 0:
+ if ignore_error:
+ return False
+ else:
+ utils.fatal("Invalid watchdog device {}: {}".format(dev, err))
+ return True
+
+ @staticmethod
+ def _load_watchdog_driver(driver):
+ """
+ Load specific watchdog driver
+ """
+ invoke("echo {} > {}".format(driver, WATCHDOG_CFG))
+ invoke("systemctl restart systemd-modules-load")
+
+ @staticmethod
+ def _get_watchdog_device_from_sbd_config():
+ """
+ Try to get watchdog device name from sbd config file
+ """
+ conf = utils.parse_sysconfig(SYSCONFIG_SBD)
+ return conf.get("SBD_WATCHDOG_DEV")
+
+ @staticmethod
+ def _driver_is_loaded(driver):
+ """
+ Check if driver was already loaded
+ """
+ _, out, _ = ShellUtils().get_stdout_stderr("lsmod")
+ return re.search("\n{}\s+".format(driver), out)
+
+ def _set_watchdog_info(self):
+ """
+ Set watchdog info through sbd query-watchdog command
+ Content in self._watchdog_info_dict: {device_name: driver_name}
+ """
+ rc, out, err = ShellUtils().get_stdout_stderr(self.QUERY_CMD)
+ if rc == 0 and out:
+ # output format might like:
+ # [1] /dev/watchdog\nIdentity: Software Watchdog\nDriver: softdog\n
+ self._watchdog_info_dict = dict(re.findall(self.DEVICE_FIND_REGREX, out))
+ else:
+ utils.fatal("Failed to run {}: {}".format(self.QUERY_CMD, err))
+
+ def _get_device_through_driver(self, driver_name):
+ """
+ Get watchdog device name which has driver_name
+ """
+ for device, driver in self._watchdog_info_dict.items():
+ if driver == driver_name and self._verify_watchdog_device(device):
+ return device
+ return None
+
+ def _get_driver_through_device_remotely(self, dev_name):
+ """
+ Given watchdog device name, get driver name on remote node
+ """
+ # FIXME
+ cmd = "ssh {} {}@{} {}".format(SSH_OPTION, self._remote_user, self._peer_host, self.QUERY_CMD)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd)
+ if rc == 0 and out:
+ # output format might like:
+ # [1] /dev/watchdog\nIdentity: Software Watchdog\nDriver: softdog\n
+ device_driver_dict = dict(re.findall(self.DEVICE_FIND_REGREX, out))
+ if device_driver_dict and dev_name in device_driver_dict:
+ return device_driver_dict[dev_name]
+ else:
+ return None
+ else:
+ utils.fatal("Failed to run {} remotely: {}".format(self.QUERY_CMD, err))
+
+ def _get_first_unused_device(self):
+ """
+ Get first unused watchdog device name
+ """
+ for dev in self._watchdog_info_dict:
+ if self._verify_watchdog_device(dev, ignore_error=True):
+ return dev
+ return None
+
+ def _set_input(self):
+ """
+ If self._input was not provided by option:
+ 1. Try to get it from sbd config file
+ 2. Try to get the first valid device from result of sbd query-watchdog
+ 3. Set the self._input as softdog
+ """
+ if not self._input:
+ dev = self._get_watchdog_device_from_sbd_config()
+ if dev and self._verify_watchdog_device(dev, ignore_error=True):
+ self._input = dev
+ return
+ first_unused = self._get_first_unused_device()
+ self._input = first_unused if first_unused else "softdog"
+
+ def _valid_device(self, dev):
+ """
+ Is an unused watchdog device
+ """
+ if dev in self._watchdog_info_dict and self._verify_watchdog_device(dev):
+ return True
+ return False
+
+ def join_watchdog(self):
+ """
+ In join proces, get watchdog device from config
+ If that device not exist, get driver name from init node, and load that driver
+ """
+ self._set_watchdog_info()
+
+ res = self._get_watchdog_device_from_sbd_config()
+ if not res:
+ utils.fatal("Failed to get watchdog device from {}".format(SYSCONFIG_SBD))
+ self._input = res
+
+ if not self._valid_device(self._input):
+ driver = self._get_driver_through_device_remotely(self._input)
+ self._load_watchdog_driver(driver)
+
+ def init_watchdog(self):
+ """
+ In init process, find valid watchdog device
+ """
+ self._set_watchdog_info()
+ self._set_input()
+
+ # self._input is a device name
+ if self._valid_device(self._input):
+ self._watchdog_device_name = self._input
+ return
+
+ # self._input is invalid, exit
+ if not invokerc("modinfo {}".format(self._input)):
+ utils.fatal("Should provide valid watchdog device or driver name by -w option")
+
+ # self._input is a driver name, load it if it was unloaded
+ if not self._driver_is_loaded(self._input):
+ self._load_watchdog_driver(self._input)
+ self._set_watchdog_info()
+
+ # self._input is a loaded driver name, find corresponding device name
+ res = self._get_device_through_driver(self._input)
+ if res:
+ self._watchdog_device_name = res
+ return
diff --git a/crmsh/xmlutil.py b/crmsh/xmlutil.py
new file mode 100644
index 0000000..ee3f96a
--- /dev/null
+++ b/crmsh/xmlutil.py
@@ -0,0 +1,1575 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# Copyright (C) 2016 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import subprocess
+from lxml import etree, doctestcompare
+import copy
+import bz2
+from collections import defaultdict
+from tempfile import mktemp
+
+from . import config, sh
+from . import options
+from . import schema
+from . import constants
+from . import userdir
+from .sh import ShellUtils
+from .utils import add_sudo, str2file, str2tmp, get_boolean, handle_role_for_ocf_1_1, copy_local_file, rmfile
+from .utils import stdout2list, crm_msec, crm_time_cmp
+from .utils import olist, get_cib_in_use, get_tempdir, to_ascii, is_boolean_true
+from . import log
+
+
+logger = log.setup_logger(__name__)
+logger_utils = log.LoggerUtils(logger)
+
+
+def xmlparse(f):
+ try:
+ cib_elem = etree.parse(f).getroot()
+ except Exception as msg:
+ logger.error("cannot parse xml: %s", msg)
+ return None
+ return cib_elem
+
+
+def file2cib_elem(s):
+ cib_tmp_copy = ''
+ try:
+ f = open(s, 'r')
+ except IOError as msg:
+ logger.debug("{} tried to read cib.xml, but : {}".format(userdir.getuser(), msg))
+ cib_tmp_copy = mktemp(suffix=".cib.xml")
+
+ if cib_tmp_copy != '':
+ logger.debug("{} gonna try it with sudo".format(userdir.getuser()))
+ # Actually it's not trying to open the file with sudo,
+ # but copying the file with sudo. We do copy,
+ # because xmlparse function requires the function descriptor not the plain text
+ # and this would be so much work to redo it.
+ # It's not too bad, but it's still a workaround and better be refactored, so FIXME!
+ copy_local_file(s, cib_tmp_copy)
+ f = open(cib_tmp_copy, 'r')
+ logger.debug("{} successfully read the cib.xml".format(userdir.getuser()))
+
+ cib_elem = xmlparse(f)
+ f.close()
+ if cib_tmp_copy != '':
+ rmfile(cib_tmp_copy)
+ if options.regression_tests and cib_elem is None:
+ print("Failed to read CIB from file: %s" % (s))
+ return cib_elem
+
+
+def compressed_file_to_cib(s):
+ try:
+ if s.endswith('.bz2'):
+ f = bz2.BZ2File(s)
+ elif s.endswith('.gz'):
+ import gzip
+ f = gzip.open(s)
+ else:
+ f = open(s)
+ except IOError as msg:
+ logger.error(msg)
+ return None
+ cib_elem = xmlparse(f)
+ if options.regression_tests and cib_elem is None:
+ print("Failed to read CIB from file %s" % (s))
+ f.seek(0)
+ print(f.read())
+ f.close()
+ return cib_elem
+
+
+cib_dump = "cibadmin -Ql"
+
+
+def sudocall(cmd):
+ cmd = add_sudo(cmd)
+ if options.regression_tests:
+ print(".EXT", cmd)
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ try:
+ outp, errp = p.communicate()
+ p.wait()
+ return p.returncode, to_ascii(outp), to_ascii(errp)
+ except IOError as msg:
+ logger.error("running %s: %s", cmd, msg)
+ return None, None, None
+
+
+def cibdump2file(fname):
+ _, outp, _ = sudocall(cib_dump)
+ if outp is not None:
+ return str2file(outp, fname)
+ return None
+
+
+def cibdump2tmp():
+ try:
+ _, outp, _ = sudocall(cib_dump)
+ if outp is not None:
+ return str2tmp(outp)
+ except IOError as msg:
+ logger.error(msg)
+ return None
+
+
+def text2elem(text):
+ """
+ Convert a text format CIB to
+ an XML tree.
+ """
+ try:
+ return etree.fromstring(text)
+ except Exception as err:
+ logger_utils.text_xml_parse_err(err, text)
+ return None
+
+
+def cibdump2elem(section=None):
+ if section:
+ cmd = "%s -o %s" % (cib_dump, section)
+ else:
+ cmd = cib_dump
+ rc, outp, errp = sudocall(cmd)
+ if rc == 0:
+ return text2elem(outp)
+ else:
+ logger.error("running %s: %s", cmd, errp)
+ return None
+
+
+def read_cib(fun, params=None):
+ cib_elem = fun(params)
+ if cib_elem is None or cib_elem.tag != "cib":
+ return None
+ return cib_elem
+
+
+def sanity_check_nvpairs(ident, node, attr_list):
+ rc = 0
+ for nvpair in node.iterchildren("nvpair"):
+ n = nvpair.get("name")
+ if n and n not in attr_list:
+ logger.warning("%s: unknown attribute '%s'", ident, n)
+ rc |= 1
+ return rc
+
+
+def sanity_check_meta(ident, node, attr_list):
+ rc = 0
+ if node is None or not attr_list:
+ return rc
+ for c in node.iterchildren():
+ if c.tag == "meta_attributes":
+ rc |= sanity_check_nvpairs(ident, c, attr_list)
+ return rc
+
+
+def get_interesting_nodes(node, nodes_l):
+ '''
+ All nodes which can be represented as CIB objects.
+ '''
+ for c in node.iterchildren():
+ if is_cib_element(c):
+ nodes_l.append(c)
+ get_interesting_nodes(c, nodes_l)
+ return nodes_l
+
+
+def get_top_cib_nodes(node, nodes_l):
+ '''
+ All nodes which can be represented as CIB objects, but not
+ nodes which are children of other CIB objects.
+ '''
+ for c in node.iterchildren():
+ if is_cib_element(c):
+ nodes_l.append(c)
+ else:
+ get_top_cib_nodes(c, nodes_l)
+ return nodes_l
+
+
+class RscState(object):
+ '''
+ Get the resource status and some other relevant bits.
+ In particular, this class should allow for a bit of caching
+ of cibadmin -Q -o resources output in case we need to check
+ more than one resource in a row.
+ '''
+
+ rsc_status = "crm_resource -W -r '%s'"
+
+ def __init__(self):
+ self.current_cib = None
+ self.rsc_elem = None
+ self.prop_elem = None
+ self.rsc_dflt_elem = None
+
+ def _init_cib(self):
+ cib = cibdump2elem("configuration")
+ self.current_cib = cib
+ self.rsc_elem = get_first_conf_elem(cib, "resources")
+ self.prop_elem = get_first_conf_elem(cib, "crm_config/cluster_property_set")
+ self.rsc_dflt_elem = get_first_conf_elem(cib, "rsc_defaults/meta_attributes")
+
+ def rsc2node(self, ident):
+ '''
+ Get a resource XML element given the id.
+ NB: this is called from almost all other methods.
+ Hence we initialize the cib here. CIB reading is
+ expensive.
+ '''
+ if self.rsc_elem is None:
+ self._init_cib()
+ if self.rsc_elem is None:
+ return None
+ # does this need to be optimized?
+ expr = './/*[@id="%s"]' % ident
+ try:
+ return self.rsc_elem.xpath(expr)[0]
+ except (IndexError, AttributeError):
+ return None
+
+ def is_ms_or_promotable_clone(self, ident):
+ '''
+ Test if the resource is master-slave.
+ '''
+ rsc_node = self.rsc2node(ident)
+ if rsc_node is None:
+ return False
+ return is_ms_or_promotable_clone(rsc_node)
+
+ def rsc_clone(self, ident):
+ '''
+ Return id of the clone/ms containing this resource
+ or None if it's not cloned.
+ '''
+ rsc_node = self.rsc2node(ident)
+ if rsc_node is None:
+ return None
+ pnode = rsc_node.getparent()
+ if pnode is None:
+ return None
+ if is_group(pnode):
+ pnode = pnode.getparent()
+ if is_clonems(pnode):
+ return pnode.get("id")
+ return None
+
+ def is_managed(self, ident):
+ '''
+ Is this resource managed?
+ '''
+ rsc_node = self.rsc2node(ident)
+ if rsc_node is None:
+ return False
+ # maintenance-mode, if true, overrides all
+ attr = get_attr_value(self.prop_elem, "maintenance-mode")
+ if attr and is_xs_boolean_true(attr):
+ return False
+ # then check the rsc is-managed meta attribute
+ rsc_meta_node = get_rsc_meta_node(rsc_node)
+ attr = get_attr_value(rsc_meta_node, "is-managed")
+ if attr:
+ return is_xs_boolean_true(attr)
+ # then rsc_defaults is-managed attribute
+ attr = get_attr_value(self.rsc_dflt_elem, "is-managed")
+ if attr:
+ return is_xs_boolean_true(attr)
+ # finally the is-managed-default property
+ attr = get_attr_value(self.prop_elem, "is-managed-default")
+ if attr:
+ return is_xs_boolean_true(attr)
+ return True
+
+ def is_running(self, ident):
+ '''
+ Is this resource running?
+ '''
+ if not is_live_cib():
+ return False
+ test_id = self.rsc_clone(ident) or ident
+ rc, outp = ShellUtils().get_stdout(self.rsc_status % test_id, stderr_on=False)
+ return outp.find("running") > 0 and outp.find("NOT") == -1
+
+ def is_group(self, ident):
+ '''
+ Test if the resource is a group
+ '''
+ rsc_node = self.rsc2node(ident)
+ if rsc_node is None:
+ return False
+ return is_group(rsc_node)
+
+ def can_delete(self, ident):
+ '''
+ Can a resource be deleted?
+ The order below is important!
+ '''
+ return not (self.is_running(ident) and not self.is_group(ident) and self.is_managed(ident))
+
+
+def resources_xml():
+ return cibdump2elem("resources")
+
+
+def is_normal_node(n):
+ return n.tag == "node" and (n.get("type") in (None, "normal", "member", ""))
+
+
+def unique_ra(typ, klass, provider):
+ """
+ Unique:
+ * it's explicitly ocf:heartbeat:
+ * no explicit class or provider
+ * only one provider (heartbeat counts as one provider)
+ Not unique:
+ * class is not ocf
+ * multiple providers
+ """
+ if klass is None and provider is None:
+ return True
+ return klass == 'ocf' and provider is None or provider == 'heartbeat'
+
+
+def mk_rsc_type(n):
+ """
+ Returns prefixless for unique RAs
+ """
+ ra_type = n.get("type")
+ ra_class = n.get("class")
+ ra_provider = n.get("provider")
+ if unique_ra(ra_type, ra_class, ra_provider):
+ ra_class = None
+ ra_provider = None
+ s1 = s2 = ''
+ if ra_class:
+ s1 = "%s:" % ra_class
+ if ra_provider:
+ s2 = "%s:" % ra_provider
+ return ''.join((s1, s2, ra_type))
+
+
+def listnodes(include_remote_nodes=True):
+ cib = cibdump2elem()
+ if cib is None:
+ return []
+ local_nodes = cib.xpath('/cib/configuration/nodes/node/@uname')
+ if include_remote_nodes:
+ remote_nodes = cib.xpath('/cib/status/node_state[@remote_node="true"]/@uname')
+ else:
+ remote_nodes = []
+ return list(set([n for n in local_nodes + remote_nodes if n]))
+
+
+def is_our_node(s):
+ '''
+ Check if s is in a list of our nodes (ignore case).
+ This is not fast, perhaps should be cached.
+
+ Includes remote nodes as well
+ '''
+ for n in listnodes():
+ if n.lower() == s.lower():
+ return True
+ return False
+
+
+def is_remote_node(n):
+ cib = cibdump2elem()
+ if cib is None:
+ return False
+ remote_nodes = cib.xpath('/cib/status/node_state[@remote_node="true"]/@uname')
+ return any(n == r for r in remote_nodes if r)
+
+
+def is_live_cib():
+ '''We working with the live cluster?'''
+ return not get_cib_in_use() and not os.getenv("CIB_file")
+
+
+def is_crmuser():
+ crmusers = ("root", config.path.crm_daemon_user)
+ return config.core.user in crmusers or userdir.getuser() in crmusers
+
+
+def cib_shadow_dir():
+ if os.getenv("CIB_shadow_dir"):
+ return os.getenv("CIB_shadow_dir")
+ if is_crmuser():
+ return config.path.crm_config
+ home = userdir.gethomedir(config.core.user)
+ if home and home.startswith(os.path.sep):
+ return os.path.join(home, ".cib")
+ return get_tempdir()
+
+
+def listshadows():
+ d = cib_shadow_dir()
+ if not os.path.isdir(d):
+ return []
+ rc, l = stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\\.//'" % d)
+ return l
+
+
+def shadowfile(name):
+ return "%s/shadow.%s" % (cib_shadow_dir(), name)
+
+
+def pe2shadow(pe_file, name):
+ '''Copy a PE file (or any CIB file) to a shadow.'''
+ try:
+ bits = open(pe_file, 'rb').read()
+ except IOError as msg:
+ logger.error("open: %s", msg)
+ return False
+ # decompresed if it ends with .bz2
+ if pe_file.endswith(".bz2"):
+ bits = bz2.decompress(bits)
+ # copy input to the shadow
+ try:
+ open(shadowfile(name), "wb").write(bits)
+ except IOError as msg:
+ logger.error("open: %s", msg)
+ return False
+ return True
+
+
+def is_xs_boolean_true(b):
+ return b.lower() in ("true", "1")
+
+
+def cloned_el(node):
+ for c in node.iterchildren():
+ if is_resource(c):
+ return c.tag
+
+
+def get_topmost_rsc(node):
+ '''
+ Return a topmost node which is a resource and contains this resource
+ '''
+ if is_container(node.getparent()):
+ return get_topmost_rsc(node.getparent())
+ return node
+
+
+attr_defaults = {
+ "rule": (("boolean-op", "and"),),
+ "expression": (("type", "string"),),
+}
+
+
+def drop_attr_defaults(node, ts=0):
+ try:
+ for defaults in attr_defaults[node.tag]:
+ if node.get(defaults[0]) == defaults[1]:
+ del node.attrib[defaults[0]]
+ except:
+ pass
+
+
+def nameandid(e, level):
+ if e.tag:
+ print(level*' ', e.tag, e.get("id"), e.get("name"))
+
+
+def xmltraverse(e, fun, ts=0):
+ for c in e.iterchildren():
+ fun(c, ts)
+ xmltraverse(c, fun, ts+1)
+
+
+def xmltraverse_thin(e, fun, ts=0):
+ '''
+ Skip elements which may be resources themselves.
+ NB: Call this only on resource (or constraint) nodes, but
+ never on cib or configuration!
+ '''
+ for c in e.iterchildren():
+ if c.tag not in ('primitive', 'group'):
+ xmltraverse_thin(c, fun, ts+1)
+ fun(e, ts)
+
+
+def xml_processnodes(e, node_filter, proc):
+ '''
+ Process with proc all nodes that match filter.
+ '''
+ node_list = []
+ for child in e.iterchildren():
+ if node_filter(child):
+ node_list.append(child)
+ if len(child) > 0:
+ xml_processnodes(child, node_filter, proc)
+ if node_list:
+ proc(node_list)
+
+
+# filter the cib
+def true(e):
+ 'Just return True.'
+ return True
+
+
+def is_entity(e):
+ return e.tag == etree.Entity
+
+
+def is_comment(e):
+ return e.tag == etree.Comment
+
+
+def is_status_node(e):
+ return e.tag == "status"
+
+
+def is_emptyelem(node, tag_l):
+ if node.tag in tag_l:
+ for a in constants.precious_attrs:
+ if node.get(a):
+ return False
+ for n in node.iterchildren():
+ return False
+ return True
+ else:
+ return False
+
+
+def is_emptynvpairs(node):
+ return is_emptyelem(node, constants.nvpairs_tags)
+
+
+def is_emptyops(node):
+ return is_emptyelem(node, ("operations",))
+
+
+def is_cib_element(node):
+ return node.tag in constants.cib_cli_map
+
+
+def is_group(node):
+ return node.tag == "group"
+
+
+def is_attr_set(node, attr):
+ return get_attr_value(get_child_nvset_node(node), attr) is not None
+
+
+def is_ms_or_promotable_clone(node):
+ is_promotable_type = is_boolean_true(is_attr_set(node, "promotable"))
+ is_ms_type = node.tag in ("master", "ms")
+ return is_ms_type or is_promotable_type
+
+
+def is_clone(node):
+ return node.tag == "clone"
+
+
+def is_clonems(node):
+ return node.tag in constants.clonems_tags
+
+
+def is_cloned(node):
+ return (node.getparent().tag in constants.clonems_tags or
+ (node.getparent().tag == "group" and
+ node.getparent().getparent().tag in constants.clonems_tags))
+
+
+def is_container(node):
+ return node.tag in constants.container_tags
+
+
+def is_primitive(node):
+ return node.tag == "primitive"
+
+
+def is_resource(node):
+ return node.tag in constants.resource_tags
+
+
+def is_template(node):
+ return node.tag == "template"
+
+
+def is_child_rsc(node):
+ return node.tag in constants.children_tags
+
+
+def is_constraint(node):
+ return node.tag in constants.constraint_tags
+
+
+def is_defaults(node):
+ return node.tag in constants.defaults_tags
+
+
+def rsc_constraint(rsc_id, con_elem):
+ for attr in list(con_elem.keys()):
+ if attr in constants.constraint_rsc_refs \
+ and rsc_id == con_elem.get(attr):
+ return True
+ for rref in con_elem.xpath("resource_set/resource_ref"):
+ if rsc_id == rref.get("id"):
+ return True
+ return False
+
+
+def is_related(rsc_id, node):
+ """
+ checks if the given node is an element
+ that has a direct relation to rsc_id. That is,
+ if it contains it, if it references it...
+ """
+ if is_constraint(node) and rsc_constraint(rsc_id, node):
+ return True
+ if node.tag == 'tag':
+ if len(node.xpath('.//obj_ref[@id="%s"]' % (rsc_id))) > 0:
+ return True
+ return False
+ if is_container(node):
+ for tag in ('primitive', 'group', 'clone', 'master'):
+ if len(node.xpath('.//%s[@id="%s"]' % (tag, rsc_id))) > 0:
+ return True
+ return False
+ return False
+
+
+def sort_container_children(e_list):
+ '''
+ Make sure that attributes's nodes are first, followed by the
+ elements (primitive/group). The order of elements is not
+ disturbed, they are just shifted to end!
+ '''
+ for node in e_list:
+ children = [x for x in node.iterchildren()
+ if x.tag in constants.children_tags]
+ for c in children:
+ node.remove(c)
+ for c in children:
+ node.append(c)
+
+
+def rmnode(e):
+ if e is not None and e.getparent() is not None:
+ e.getparent().remove(e)
+
+
+def rmnodes(e_list):
+ for e in e_list:
+ rmnode(e)
+
+
+def printid(e_list):
+ for e in e_list:
+ ident = e.get("id")
+ if ident:
+ print("element id:", ident)
+
+
+def remove_dflt_attrs(e_list):
+ '''
+ Drop optional attributes which are already set to default
+ '''
+ for e in e_list:
+ try:
+ d = constants.attr_defaults[e.tag]
+ for a in list(d.keys()):
+ if e.get(a) == d[a]:
+ del e.attrib[a]
+ except:
+ pass
+
+
+def remove_text(e_list):
+ for e in e_list:
+ if not is_comment(e):
+ e.text = None
+ e.tail = None
+
+
+def sanitize_cib(doc):
+ xml_processnodes(doc, is_status_node, rmnodes)
+ # xml_processnodes(doc, true, printid)
+ # xml_processnodes(doc, is_emptynvpairs, rmnodes)
+ # xml_processnodes(doc, is_emptyops, rmnodes)
+ xml_processnodes(doc, is_entity, rmnodes)
+ # xml_processnodes(doc, is_comment, rmnodes)
+ xml_processnodes(doc, is_container, sort_container_children)
+ xml_processnodes(doc, true, remove_dflt_attrs)
+ xml_processnodes(doc, true, remove_text)
+ xmltraverse(doc, drop_attr_defaults)
+
+def sanitize_cib_for_patching(doc):
+ """
+ Custom version of sanitize_cib which
+ doesn't sort container children, to use
+ for processing the original CIB when
+ generating a patch to apply using crm_diff.
+ """
+ xml_processnodes(doc, is_status_node, rmnodes)
+ xml_processnodes(doc, is_entity, rmnodes)
+ xml_processnodes(doc, true, remove_dflt_attrs)
+ xml_processnodes(doc, true, remove_text)
+
+def is_simpleconstraint(node):
+ return len(node.xpath("resource_set/resource_ref")) == 0
+
+
+match_list = defaultdict(tuple,
+ {"node": ("uname",),
+ "nvpair": ("name",),
+ "op": ("name", "interval"),
+ "rule": ("score", "score-attribute", "role"),
+ "expression": ("attribute", "operation", "value"),
+ "fencing-level": ("target", "devices"),
+ "alert": ("path",),
+ "recipient": ("value",)})
+
+
+def add_comment(e, s):
+ '''
+ Add comment s to e from doc.
+ '''
+ if e is None or not s:
+ return
+ comm_elem = etree.Comment(s)
+ firstelem_idx = 0
+ for c in e.iterchildren():
+ firstelem_idx = e.index(c)
+ break
+ e.insert(firstelem_idx, comm_elem)
+
+
+def stuff_comments(node, comments):
+ if not comments:
+ return
+ for s in reversed(comments):
+ add_comment(node, s)
+
+
+def fix_comments(e):
+ 'Make sure that comments start with #'
+ celems = [x for x in e.iterchildren() if is_comment(x)]
+ for c in celems:
+ c.text = c.text.strip()
+ if not c.text.startswith("#"):
+ c.text = "# %s" % c.text
+
+
+def set_id_used_attr(e):
+ e.set("__id_used", "Yes")
+
+
+def is_id_used_attr(e):
+ return e.get("__id_used") == "Yes"
+
+
+def remove_id_used_attr(e, lvl):
+ if is_id_used_attr(e):
+ del e.attrib["__id_used"]
+
+
+def remove_id_used_attributes(e):
+ if e is not None:
+ xmltraverse(e, remove_id_used_attr)
+
+
+def lookup_node(node, oldnode, location_only=False, ignore_id=False):
+ '''
+ Find a child of oldnode which matches node.
+ This is used to "harvest" existing ids in order to prevent
+ irrelevant changes to the XML code.
+ The list of attributes to match is in the dictionary
+ match_list.
+ The "id" attribute is treated differently. In case the new node
+ (the first parameter here) contains the id, then the "id"
+ attribute is added to the match list.
+ '''
+ if oldnode is None:
+ return None
+ attr_list = list(match_list[node.tag])
+ if not ignore_id and node.get("id"):
+ attr_list.append("id")
+ for c in oldnode.iterchildren():
+ if not location_only and is_id_used_attr(c):
+ continue
+ if node.tag == c.tag:
+ for a in attr_list:
+ if node.get(a) != c.get(a):
+ break
+ else:
+ return c
+ return None
+
+
+def find_operation(rsc_node, name, interval=None):
+ '''
+ Setting interval to "non-0" means get the first op with interval
+ different from 0.
+ Not setting interval at all means get the only matching op, or the
+ 0 op (if any)
+ '''
+ matching_name = []
+ for ops in rsc_node.findall("operations"):
+ matching_name.extend([op for op in ops.iterchildren("op")
+ if op.get("name") == name])
+ if interval is None and len(matching_name) == 1:
+ return matching_name[0]
+ interval = interval or "0"
+ for op in matching_name:
+ opint = op.get("interval")
+ if interval == "non-0" and crm_msec(opint) > 0:
+ return op
+ if crm_time_cmp(opint, interval) == 0:
+ return op
+ return None
+
+
+def get_op_timeout(rsc_node, op, default_timeout):
+ interval = (op == "monitor" and "non-0" or "0")
+ op_n = find_operation(rsc_node, op == "probe" and "monitor" or op, interval)
+ timeout = op_n is not None and op_n.get("timeout") or default_timeout
+ return crm_msec(timeout)
+
+
+def op2list(node):
+ pl = []
+ action = ""
+ for name in list(node.keys()):
+ if name == "name":
+ action = node.get(name)
+ elif name != "id": # skip the id
+ pl.append([name, node.get(name)])
+ if not action:
+ logger.error("op is invalid (no name)")
+ return action, pl
+
+
+def get_rsc_operations(rsc_node):
+ actions = [op2list(op) for op in rsc_node.xpath('.//operations/op')]
+ actions = [[op, pl] for op, pl in actions if op]
+ return actions
+
+
+# lower score = earlier sort
+def make_sort_map(*order):
+ m = {}
+ for i, o in enumerate(order):
+ if isinstance(o, str):
+ m[o] = i
+ else:
+ for k in o:
+ m[k] = i
+ return m
+
+
+_sort_xml_order = make_sort_map('node',
+ 'template', 'primitive', 'bundle', 'group', 'master', 'clone', 'op',
+ 'tag',
+ ['rsc_location', 'rsc_colocation', 'rsc_order'],
+ ['rsc_ticket', 'fencing-topology'],
+ 'cluster_property_set', 'rsc_defaults', 'op_defaults',
+ 'acl_role', ['acl_target', 'acl_group', 'acl_user'],
+ 'alert')
+
+_sort_cli_order = make_sort_map('node',
+ 'rsc_template', 'primitive', 'bundle', 'group',
+ ['ms', 'master'], 'clone', 'op',
+ 'tag',
+ ['location', 'colocation', 'collocation', 'order'],
+ ['rsc_ticket', 'fencing_topology'],
+ 'property', 'rsc_defaults', 'op_defaults',
+ 'role', ['acl_target', 'acl_group', 'user'],
+ 'alert')
+
+_SORT_LAST = 1000
+
+
+def processing_sort(nl):
+ '''
+ It's usually important to process cib objects in this order,
+ i.e. simple objects first.
+
+ TODO: if sort_elements is disabled, only sort to resolve inter-dependencies.
+ '''
+ def sort_elements(k):
+ return _sort_xml_order.get(k.tag, _SORT_LAST)
+
+ def sort_type(k):
+ return _sort_xml_order.get(k.tag, _SORT_LAST)
+
+ return sorted(nl, key=sort_elements if config.core.sort_elements else sort_type)
+
+
+def processing_sort_cli(nl):
+ '''
+ nl: list of objects (CibObject)
+ Returns the given list in order
+
+ TODO: if sort_elements is disabled, only sort to resolve inter-dependencies.
+ '''
+ def sort_elements(k):
+ return _sort_cli_order.get(k.obj_type, _SORT_LAST), k.obj_id
+
+ def sort_type(k):
+ return _sort_cli_order.get(k.obj_type, _SORT_LAST)
+
+ return sorted(nl, key=sort_elements if config.core.sort_elements else sort_type)
+
+
+def is_resource_cli(s):
+ return s in olist(constants.resource_cli_names)
+
+
+def is_constraint_cli(s):
+ return s in olist(constants.constraint_cli_names)
+
+
+def referenced_resources(node):
+ if not is_constraint(node):
+ return []
+ xml_obj_type = node.tag
+ rsc_list = []
+ if xml_obj_type == "rsc_location" and node.get("rsc"):
+ rsc_list = [node.get("rsc")]
+ elif node.xpath("resource_set/resource_ref"):
+ # resource sets
+ rsc_list = [x.get("id")
+ for x in node.xpath("resource_set/resource_ref")]
+ elif xml_obj_type == "rsc_colocation":
+ rsc_list = [node.get("rsc"), node.get("with-rsc")]
+ elif xml_obj_type == "rsc_order":
+ rsc_list = [node.get("first"), node.get("then")]
+ elif xml_obj_type == "rsc_ticket":
+ rsc_list = [node.get("rsc")]
+ return [rsc for rsc in rsc_list if rsc is not None]
+
+
+def rename_id(node, old_id, new_id):
+ if node.get("id") == old_id:
+ node.set("id", new_id)
+
+
+def rename_rscref_simple(c_obj, old_id, new_id):
+ c_modified = False
+ for attr in list(c_obj.node.keys()):
+ if attr in constants.constraint_rsc_refs and \
+ c_obj.node.get(attr) == old_id:
+ c_obj.node.set(attr, new_id)
+ c_obj.updated = True
+ c_modified = True
+ return c_modified
+
+
+def delete_rscref_simple(c_obj, rsc_id):
+ c_modified = False
+ for attr in list(c_obj.node.keys()):
+ if attr in constants.constraint_rsc_refs and \
+ c_obj.node.get(attr) == rsc_id:
+ del c_obj.node.attrib[attr]
+ c_obj.updated = True
+ c_modified = True
+ return c_modified
+
+
+def rset_uniq(c_obj, d):
+ '''
+ Drop duplicate resource references.
+ '''
+ l = []
+ for rref in c_obj.node.xpath("resource_set/resource_ref"):
+ rsc_id = rref.get("id")
+ if d[rsc_id] > 1:
+ # drop one
+ l.append(rref)
+ d[rsc_id] -= 1
+ rmnodes(l)
+
+
+def delete_rscref_rset(c_obj, rsc_id):
+ '''
+ Drop all reference to rsc_id.
+ '''
+ c_modified = False
+ l = []
+ for rref in c_obj.node.xpath("resource_set/resource_ref"):
+ if rsc_id == rref.get("id"):
+ l.append(rref)
+ c_obj.updated = True
+ c_modified = True
+ rmnodes(l)
+ l = []
+ cnt = 0
+ nonseq_rset = False
+ for rset in c_obj.node.findall("resource_set"):
+ rref_cnt = len(rset.findall("resource_ref"))
+ if rref_cnt == 0:
+ l.append(rset)
+ c_obj.updated = True
+ c_modified = True
+ elif not get_boolean(rset.get("sequential"), True) and rref_cnt > 1:
+ nonseq_rset = True
+ cnt += rref_cnt
+ rmnodes(l)
+ if not nonseq_rset and cnt == 2:
+ rset_convert(c_obj)
+ return c_modified
+
+
+def rset_convert(c_obj):
+ l = c_obj.node.xpath("resource_set/resource_ref")
+ if len(l) != 2:
+ return # eh?
+ rsetcnt = 0
+ for rset in c_obj.node.findall("resource_set"):
+ # in case there are multiple non-sequential sets
+ if rset.get("sequential"):
+ del rset.attrib["sequential"]
+ rsetcnt += 1
+ c_obj.modified = True
+ cli = c_obj.repr_cli(format_mode=-1)
+ cli = cli.replace("_rsc_set_ ", "")
+ newnode = c_obj.cli2node(cli)
+ if newnode is not None:
+ c_obj.node.getparent().replace(c_obj.node, newnode)
+ c_obj.node = newnode
+ if rsetcnt == 1 and c_obj.obj_type == "colocation":
+ # exchange the elements in colocations
+ rsc = newnode.get("rsc")
+ with_rsc = newnode.get("with-rsc")
+ if with_rsc is not None:
+ newnode.set("rsc", with_rsc)
+ if rsc is not None:
+ newnode.set("with-rsc", rsc)
+
+
+def rename_rscref_rset(c_obj, old_id, new_id):
+ c_modified = False
+ d = {}
+ for rref in c_obj.node.xpath("resource_set/resource_ref"):
+ rsc_id = rref.get("id")
+ if rsc_id == old_id:
+ rref.set("id", new_id)
+ rsc_id = new_id
+ c_obj.updated = True
+ c_modified = True
+ if rsc_id not in d:
+ d[rsc_id] = 1
+ else:
+ d[rsc_id] += 1
+ rset_uniq(c_obj, d)
+ # if only two resource references remained then, to preserve
+ # sanity, convert it to a simple constraint (sigh)
+ cnt = 0
+ for key in d:
+ cnt += d[key]
+ if cnt == 2:
+ rset_convert(c_obj)
+ return c_modified
+
+
+def rename_rscref(c_obj, old_id, new_id):
+ if rename_rscref_simple(c_obj, old_id, new_id) or \
+ rename_rscref_rset(c_obj, old_id, new_id):
+ logger.info("modified %s from %s to %s", str(c_obj), old_id, new_id)
+
+
+def delete_rscref(c_obj, rsc_id):
+ return delete_rscref_simple(c_obj, rsc_id) or \
+ delete_rscref_rset(c_obj, rsc_id)
+
+
+def silly_constraint(c_node, rsc_id):
+ '''
+ Remove a constraint from rsc_id to rsc_id.
+ Or an invalid one.
+ '''
+ if c_node.xpath("resource_set/resource_ref"):
+ # it's a resource set
+ # the resource sets have already been uniq-ed
+ cnt = len(c_node.xpath("resource_set/resource_ref"))
+ if c_node.tag in ("rsc_location", "rsc_ticket"): # locations and tickets are never silly
+ return cnt < 1
+ return cnt <= 1
+ cnt = 0 # total count of referenced resources have to be at least two
+ rsc_cnt = 0
+ for attr in list(c_node.keys()):
+ if attr in constants.constraint_rsc_refs:
+ cnt += 1
+ if c_node.get(attr) == rsc_id:
+ rsc_cnt += 1
+ if c_node.tag in ("rsc_location", "rsc_ticket"): # locations and tickets are never silly
+ return cnt < 1
+ else:
+ return rsc_cnt == 2 or cnt < 2
+
+
+def is_climove_location(node):
+ 'Figure out if the location was created by crm resource move.'
+ rule_l = node.findall("rule")
+ expr_l = node.xpath(".//expression")
+ return len(rule_l) == 1 and len(expr_l) == 1 and \
+ node.get("id").startswith("cli-") and \
+ expr_l[0].get("attribute") == "#uname" and \
+ expr_l[0].get("operation") == "eq"
+
+
+def is_pref_location(node):
+ 'Figure out if the location is a node preference.'
+ rule_l = node.findall("rule")
+ expr_l = node.xpath(".//expression")
+ return len(rule_l) == 1 and len(expr_l) == 1 and \
+ expr_l[0].get("attribute") == "#uname" and \
+ expr_l[0].get("operation") == "eq"
+
+
+def get_rsc_ref_ids(node):
+ return [x.get("id")
+ for x in node.xpath("./resource_ref")]
+
+
+def get_rsc_children_ids(node):
+ return [x.get("id")
+ for x in node.iterchildren() if is_child_rsc(x)]
+
+
+def get_prim_children_ids(node):
+ l = [x for x in node.iterchildren() if is_child_rsc(x)]
+ if len(l) and l[0].tag == "group":
+ l = [x for x in l[0].iterchildren() if is_child_rsc(x)]
+ return [x.get("id") for x in l]
+
+
+def get_child_nvset_node(node, attr_set="meta_attributes"):
+ if node is None:
+ return None
+ for c in node.iterchildren():
+ if c.tag != attr_set:
+ continue
+ return c
+ return None
+
+
+def get_rscop_defaults_meta_node(node):
+ return get_child_nvset_node(node)
+
+
+def get_rsc_meta_node(node):
+ return get_child_nvset_node(node)
+
+
+def get_properties_node(node):
+ return get_child_nvset_node(node, attr_set="cluster_property_set")
+
+
+def new_cib():
+ cib_elem = etree.Element("cib")
+ conf_elem = etree.SubElement(cib_elem, "configuration")
+ for name in schema.get('sub', "configuration", 'r'):
+ etree.SubElement(conf_elem, name)
+ return cib_elem
+
+
+def get_conf_elems(cib_elem, path):
+ '''
+ Get a list of configuration elements. All elements are within
+ /configuration
+ '''
+ if cib_elem is None:
+ return None
+ return cib_elem.xpath("//configuration/%s" % path)
+
+
+def get_first_conf_elem(cib_elem, path):
+ try:
+ elems = get_conf_elems(cib_elem, path)
+ return elems[0] if elems else None
+ except IndexError:
+ return None
+
+
+def get_topnode(cib_elem, tag):
+ "Get configuration element or create/append if there's none."
+ conf_elem = cib_elem.find("configuration")
+ if conf_elem is None:
+ logger.error("no configuration element found!")
+ return None
+ if tag == "configuration":
+ return conf_elem
+ e = cib_elem.find("configuration/%s" % tag)
+ if e is None:
+ logger.debug("create configuration section %s", tag)
+ e = etree.SubElement(conf_elem, tag)
+ return e
+
+
+def get_attr_in_set(e, attr):
+ if e is None:
+ return None
+ for c in e.iterchildren("nvpair"):
+ if c.get("name") == attr:
+ return c
+ return None
+
+
+def get_attr_value(e, attr):
+ try:
+ return get_attr_in_set(e, attr).get("value")
+ except:
+ return None
+
+
+def set_attr(e, attr, value):
+ '''
+ Set an attribute in the attribute set.
+ '''
+ nvp = get_attr_in_set(e, attr)
+ if nvp is None:
+ from . import idmgmt
+ nvp = etree.SubElement(e, "nvpair", id="", name=attr, value=value)
+ nvp.set("id", idmgmt.new(nvp, e.get("id")))
+ else:
+ nvp.set("name", attr)
+ nvp.set("value", value)
+
+
+def get_set_nodes(e, setname, create=False):
+ """Return the attributes set nodes (create one if requested)
+ setname can for example be meta_attributes
+ """
+ l = [c for c in e.iterchildren(setname)]
+ if l:
+ return l
+ if create:
+ from . import idmgmt
+ elem = etree.SubElement(e, setname, id="")
+ elem.set("id", idmgmt.new(elem, e.get("id")))
+ l.append(elem)
+ return l
+
+def get_set_instace_attributes(e, create=False):
+ '''
+ Return instance attributes set nodes (create one if requested)
+ '''
+ l = [c for c in e.iterchildren("instance_attributes")]
+ if l:
+ return l
+ if create:
+ from . import idmgmt
+ elem = etree.SubElement(e, "instance_attributes", id="")
+ elem.set("id", "nodes-"+e.attrib["id"])
+ l.append(elem)
+ return l
+
+
+_checker = doctestcompare.LXMLOutputChecker()
+
+
+def xml_equals_unordered(a, b):
+ """
+ used by xml_equals to compare xml trees without ordering.
+ NOTE: resource_set children SHOULD be compared with ordering.
+ """
+ def fail(msg):
+ logger.debug("%s!=%s: %s", a.tag, b.tag, msg)
+ return False
+
+ def tagflat(x):
+ return isinstance(x.tag, str) and x.tag or x.text
+
+ def sortby(v):
+ if v.tag == 'primitive':
+ return v.tag
+ return tagflat(v) + ''.join(sorted(list(v.attrib.keys()) + list(v.attrib.values())))
+
+ def safe_strip(text):
+ return text is not None and text.strip() or ''
+
+ if a.tag != b.tag:
+ return fail("tags differ: %s != %s" % (a.tag, b.tag))
+ elif a.attrib != b.attrib:
+ return fail("attributes differ: %s != %s" % (a.attrib, b.attrib))
+ elif safe_strip(a.text) != safe_strip(b.text):
+ return fail("text differ %s != %s" % (repr(a.text), repr(b.text)))
+ elif safe_strip(a.tail) != safe_strip(b.tail):
+ return fail("tails differ: %s != %s" % (a.tail, b.tail))
+ elif len(a) != len(b):
+ return fail("number of children differ")
+ elif len(a) == 0:
+ return True
+
+ # order matters here, but in a strange way:
+ # all primitive tags should sort the same..
+ if a.tag == 'resource_set':
+ return all(xml_equals_unordered(a, b) for a, b in zip(a, b))
+ else:
+ sorted_children = list(zip(sorted(a, key=sortby), sorted(b, key=sortby)))
+ return all(xml_equals_unordered(a, b) for a, b in sorted_children)
+
+
+def xml_equals(n, m, show=False):
+ rc = xml_equals_unordered(n, m)
+ if not rc and show and config.core.debug:
+ # somewhat strange, but that's how this works
+ from doctest import Example
+ example = Example("etree.tostring(n)", xml_tostring(n))
+ got = xml_tostring(m)
+ print(_checker.output_difference(example, got, 0))
+ return rc
+
+
+def xml_tostring(*args, **kwargs):
+ """
+ Python 2/3 conversion utility:
+ etree.tostring returns a bytestring, but
+ we need actual Python strings.
+ """
+ return etree.tostring(*args, **kwargs).decode('utf-8')
+
+
+def merge_attributes(dnode, snode, tag):
+ rc = False
+ add_children = []
+ for sc in snode.iterchildren(tag):
+ dc = lookup_node(sc, dnode, ignore_id=True)
+ if dc is not None:
+ for a, v in list(sc.items()):
+ if a == "id":
+ continue
+ if v != dc.get(a):
+ dc.set(a, v)
+ rc = True
+ else:
+ add_children.append(sc)
+ rc = True
+ for c in add_children:
+ dnode.append(copy.deepcopy(c))
+ return rc
+
+
+def merge_nodes(dnode, snode):
+ '''
+ Import elements from snode into dnode.
+ If an element is attributes set (constants.nvpairs_tags) or
+ "operations", then merge attributes in the children.
+ Otherwise, replace the whole element. (TBD)
+ '''
+ rc = False # any changes done?
+ if dnode is None or snode is None:
+ return rc
+ add_children = []
+ for sc in snode.iterchildren():
+ dc = lookup_node(sc, dnode, ignore_id=True)
+ if dc is None:
+ if sc.tag in constants.nvpairs_tags or sc.tag == "operations":
+ add_children.append(sc)
+ rc = True
+ elif dc.tag in constants.nvpairs_tags:
+ rc = merge_attributes(dc, sc, "nvpair") or rc
+ elif dc.tag == "operations":
+ rc = merge_attributes(dc, sc, "op") or rc
+ for c in add_children:
+ dnode.append(copy.deepcopy(c))
+ return rc
+
+
+def merge_tmpl_into_prim(prim_node, tmpl_node):
+ '''
+ Create a new primitive element which is a merge of a
+ rsc_template and a primitive which references it.
+ '''
+ dnode = etree.Element(prim_node.tag)
+ merge_nodes(dnode, tmpl_node)
+ merge_nodes(dnode, prim_node)
+ # the resulting node should inherit all primitives attributes
+ for a, v in list(prim_node.items()):
+ dnode.set(a, v)
+ # but class/provider/type are coming from the template
+ # savannah#41410: stonith resources do not have the provider
+ # attribute
+ for a in ("class", "provider", "type"):
+ v = tmpl_node.get(a)
+ if v is not None:
+ dnode.set(a, v)
+ return dnode
+
+
+def check_id_ref(elem, id_ref):
+ target = elem.xpath('.//*[@id="%s"]' % (id_ref))
+ if len(target) == 0:
+ logger.error("Reference not found: %s", id_ref)
+ elif len(target) > 1:
+ logger.error("Ambiguous reference to %s", id_ref)
+
+
+def new(tag, **attributes):
+ """
+ <tag/>
+ """
+ return etree.Element(tag, **attributes)
+
+
+def child(parent, tag, **attributes):
+ """append new tag to parent.
+ Use append() in case parent is a list and not an element.
+ """
+ e = etree.Element(tag, **attributes)
+ parent.append(e)
+ return e
+
+
+def tostring(n):
+ return etree.tostring(n, pretty_print=True)
+
+
+def maybe_set(node, key, value):
+ if value:
+ node.set(key, value)
+ return node
+
+
+def nvpair(name, value):
+ """
+ <nvpair name="" value="" />
+ """
+ value = handle_role_for_ocf_1_1(value, name=name)
+ return new("nvpair", name=name, value=value)
+
+
+def nvpair_id(nvpairid, name, value):
+ """
+ <nvpair id="" name="" value="" />
+ """
+ if name is None:
+ name = nvpairid
+ return new("nvpair", id=nvpairid, name=name, value=value)
+
+
+def nvpair_ref(idref, name=None):
+ """
+ <nvpair id-ref=<idref> [name=<name>]/>
+ """
+ nvp = new("nvpair")
+ nvp.set('id-ref', idref)
+ if name is not None:
+ nvp.set('name', name)
+ return nvp
+
+
+def set_date_expression(expr, tag, values):
+ """
+ Fill in date_expression tag for date_spec/in_range operations
+ expr: <date_expression/>
+ values: [nvpair...]
+ """
+ if set(nvp.get('name') for nvp in values) == set(constants.in_range_attrs):
+ for nvp in values:
+ expr.set(nvp.get('name'), nvp.get('value'))
+ return expr
+ subtag = child(expr, tag)
+ for nvp in values:
+ if nvp.get('name') in constants.in_range_attrs:
+ expr.set(nvp.get('name'), nvp.get('value'))
+ else:
+ subtag.set(nvp.get('name'), nvp.get('value'))
+ return expr
+
+
+def attributes(typename, rules, values, xmlid=None, score=None):
+ """
+ Represents a set of name-value pairs, tagged with
+ a container typename and an optional xml id.
+ The container can also hold rule expressions, passed
+ in the rules parameter.
+
+ returns an xml object containing the data
+ example:
+ <instance_attributes id="foo">
+ <nvpair name="thing" value="yes"/>
+ </instance_attributes>
+ """
+ e = new(typename)
+ if xmlid:
+ e.set("id", xmlid)
+ if score:
+ e.set("score", score)
+ for rule in rules:
+ e.append(rule)
+ for nvp in values:
+ e.append(nvp)
+ return e
+
+
+class CrmMonXmlParser(object):
+ """
+ Class to parse xml output of crm_mon
+ """
+ def __init__(self, peer=None):
+ """
+ Init function
+ when peer set, parse peer node's results
+ """
+ self.peer = peer
+ self.xml_elem = self._load()
+
+ def _load(self):
+ """
+ Load xml output of crm_mon
+ """
+ _, output, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(self.peer, constants.CRM_MON_XML_OUTPUT)
+ return text2elem(output)
+
+ def is_node_online(self, node):
+ """
+ Check if a node is online
+ """
+ xpath = f'//node[@name="{node}" and @online="true"]'
+ return bool(self.xml_elem.xpath(xpath))
+
+ def get_node_list(self, attr=None):
+ """
+ Get a list of nodes based on the given attribute
+ """
+ attr_dict = {
+ 'standby': '[@standby="true"]',
+ 'online': '[@standby="false"]'
+ }
+ xpath_str = f'//node{attr_dict.get(attr, "")}'
+ return [e.get('name') for e in self.xml_elem.xpath(xpath_str)]
+
+ def is_resource_configured(self, ra_type):
+ """
+ Check if the RA is configured
+ """
+ xpath = f'//resource[@resource_agent="{ra_type}"]'
+ return bool(self.xml_elem.xpath(xpath))
+
+ def is_any_resource_running(self):
+ """
+ Check if any RA is running
+ """
+ xpath = '//resource[@active="true"]'
+ return bool(self.xml_elem.xpath(xpath))
+
+ def is_resource_started(self, ra):
+ """
+ Check if the RA started(in all clone instances if configured as clone)
+
+ @ra could be resource id or resource type
+ """
+ xpath = f'//resource[(@id="{ra}" or @resource_agent="{ra}") and @active="true" and @role="Started"]'
+ return bool(self.xml_elem.xpath(xpath))
+
+ def get_resource_id_list_via_type(self, ra_type):
+ """
+ Given configured ra type, get the ra id list
+ """
+ xpath = f'//resource[@resource_agent="{ra_type}"]'
+ return [elem.get('id') for elem in self.xml_elem.xpath(xpath)]
+# vim:ts=4:sw=4:et:
diff --git a/data-manifest b/data-manifest
new file mode 100644
index 0000000..ed87a2b
--- /dev/null
+++ b/data-manifest
@@ -0,0 +1,225 @@
+scripts/apache/main.yml
+scripts/check-uptime/fetch.py
+scripts/check-uptime/main.yml
+scripts/check-uptime/report.py
+scripts/clvm/main.yml
+scripts/clvm-vg/main.yml
+scripts/cryptctl/main.yml
+scripts/cryptctl/README.md
+scripts/database/main.yml
+scripts/db2-hadr/main.yml
+scripts/db2/main.yml
+scripts/drbd/main.yml
+scripts/exportfs/main.yml
+scripts/filesystem/main.yml
+scripts/gfs2-base/main.yml
+scripts/gfs2/main.yml
+scripts/haproxy/haproxy.cfg
+scripts/haproxy/main.yml
+scripts/health/collect.py
+scripts/health/hahealth.py
+scripts/health/main.yml
+scripts/health/report.py
+scripts/libvirt/main.yml
+scripts/lvm-drbd/main.yml
+scripts/lvm/main.yml
+scripts/mailto/main.yml
+scripts/nfsserver-lvm-drbd/main.yml
+scripts/nfsserver/main.yml
+scripts/nginx/main.yml
+scripts/ocfs2/main.yml
+scripts/oracle/main.yml
+scripts/raid1/main.yml
+scripts/raid-lvm/main.yml
+scripts/sap-as/main.yml
+scripts/sap-ci/main.yml
+scripts/sap-db/main.yml
+scripts/sapdb/main.yml
+scripts/sapinstance/main.yml
+scripts/sap-simple-stack/main.yml
+scripts/sap-simple-stack-plus/main.yml
+scripts/sbd-device/main.yml
+scripts/sbd/main.yml
+scripts/virtual-ip/main.yml
+scripts/vmware/main.yml
+templates/apache
+templates/clvm
+templates/filesystem
+templates/gfs2
+templates/gfs2-base
+templates/ocfs2
+templates/sbd
+templates/virtual-ip
+test/bugs-test.txt
+test/cibtests/001.exp.xml
+test/cibtests/001.input
+test/cibtests/002.exp.xml
+test/cibtests/002.input
+test/cibtests/003.exp.xml
+test/cibtests/003.input
+test/cibtests/004.exp.xml
+test/cibtests/004.input
+test/cib-tests.sh
+test/cibtests/shadow.base
+test/crm-interface
+test/defaults
+test/descriptions
+test/evaltest.sh
+test/features/bootstrap_bugs.feature
+test/features/bootstrap_init_join_remove.feature
+test/features/bootstrap_options.feature
+test/features/bootstrap_sbd_delay.feature
+test/features/bootstrap_sbd_normal.feature
+test/features/cluster_api.feature
+test/features/configure_bugs.feature
+test/features/constraints_bugs.feature
+test/features/coveragerc
+test/features/crm_report_bugs.feature
+test/features/crm_report_normal.feature
+test/features/environment.py
+test/features/geo_setup.feature
+test/features/healthcheck.feature
+test/features/ocfs2.feature
+test/features/qdevice_options.feature
+test/features/qdevice_setup_remove.feature
+test/features/qdevice_usercase.feature
+test/features/qdevice_validate.feature
+test/features/resource_failcount.feature
+test/features/resource_set.feature
+test/features/ssh_agent.feature
+test/features/steps/behave_agent.py
+test/features/steps/const.py
+test/features/steps/__init__.py
+test/features/steps/step_implementation.py
+test/features/steps/utils.py
+test/features/user_access.feature
+test/history-test.tar.bz2
+test/list-undocumented-commands.py
+test/profile-history.sh
+test/README.regression
+test/regression.sh
+test/run-functional-tests
+test/testcases/acl
+test/testcases/acl.excl
+test/testcases/acl.exp
+test/testcases/basicset
+test/testcases/bugs
+test/testcases/bugs.exp
+test/testcases/bundle
+test/testcases/bundle.exp
+test/testcases/commit
+test/testcases/commit.exp
+test/testcases/common.excl
+test/testcases/common.filter
+test/testcases/confbasic
+test/testcases/confbasic.exp
+test/testcases/confbasic-xml
+test/testcases/confbasic-xml.exp
+test/testcases/confbasic-xml.filter
+test/testcases/delete
+test/testcases/delete.exp
+test/testcases/edit
+test/testcases/edit.excl
+test/testcases/edit.exp
+test/testcases/file
+test/testcases/file.exp
+test/testcases/history
+test/testcases/history.excl
+test/testcases/history.exp
+test/testcases/history.post
+test/testcases/history.pre
+test/testcases/newfeatures
+test/testcases/newfeatures.exp
+test/testcases/node
+test/testcases/node.exp
+test/testcases/options
+test/testcases/options.exp
+test/testcases/ra
+test/testcases/ra.exp
+test/testcases/ra.filter
+test/testcases/resource
+test/testcases/resource.exp
+test/testcases/rset
+test/testcases/rset.exp
+test/testcases/rset-xml
+test/testcases/rset-xml.exp
+test/testcases/scripts
+test/testcases/scripts.exp
+test/testcases/scripts.filter
+test/testcases/shadow
+test/testcases/shadow.exp
+test/testcases/xmlonly.sh
+test/unittests/bug-862577_corosync.conf
+test/unittests/corosync.conf.1
+test/unittests/corosync.conf.2
+test/unittests/corosync.conf.3
+test/unittests/__init__.py
+test/unittests/pacemaker.log
+test/unittests/pacemaker.log.2
+test/unittests/pacemaker_unicode.log
+test/unittests/schemas/acls-1.1.rng
+test/unittests/schemas/acls-1.2.rng
+test/unittests/schemas/constraints-1.0.rng
+test/unittests/schemas/constraints-1.1.rng
+test/unittests/schemas/constraints-1.2.rng
+test/unittests/schemas/fencing.rng
+test/unittests/schemas/nvset.rng
+test/unittests/schemas/pacemaker-1.0.rng
+test/unittests/schemas/pacemaker-1.1.rng
+test/unittests/schemas/pacemaker-1.2.rng
+test/unittests/schemas/resources-1.0.rng
+test/unittests/schemas/resources-1.1.rng
+test/unittests/schemas/resources-1.2.rng
+test/unittests/schemas/rule.rng
+test/unittests/schemas/score.rng
+test/unittests/schemas/versions.rng
+test/unittests/scripts/inc1/main.yml
+test/unittests/scripts/inc2/main.yml
+test/unittests/scripts/legacy/main.yml
+test/unittests/scripts/templates/apache.xml
+test/unittests/scripts/templates/virtual-ip.xml
+test/unittests/scripts/unified/main.yml
+test/unittests/scripts/v2/main.yml
+test/unittests/scripts/vipinc/main.yml
+test/unittests/scripts/vip/main.yml
+test/unittests/scripts/workflows/10-webserver.xml
+test/unittests/test_bootstrap.py
+test/unittests/test_bugs.py
+test/unittests/test_cib.py
+test/unittests/test_cliformat.py
+test/unittests/test.conf
+test/unittests/test_corosync.py
+test/unittests/test_crashtest_check.py
+test/unittests/test_crashtest_main.py
+test/unittests/test_crashtest_task.py
+test/unittests/test_crashtest_utils.py
+test/unittests/test_gv.py
+test/unittests/test_handles.py
+test/unittests/test_lock.py
+test/unittests/test_objset.py
+test/unittests/test_ocfs2.py
+test/unittests/test_parallax.py
+test/unittests/test_parse.py
+test/unittests/test_prun.py
+test/unittests/test_qdevice.py
+test/unittests/test_ratrace.py
+test/unittests/test_report_collect.py
+test/unittests/test_report_core.py
+test/unittests/test_report_utils.py
+test/unittests/test_sbd.py
+test/unittests/test_scripts.py
+test/unittests/test_service_manager.py
+test/unittests/test_sh.py
+test/unittests/test_time.py
+test/unittests/test_ui_cluster.py
+test/unittests/test_upgradeuitl.py
+test/unittests/test_utils.py
+test/unittests/test_watchdog.py
+test/unittests/test_xmlutil.py
+test/update-expected-output.sh
+utils/crm_clean.py
+utils/crm_init.py
+utils/crm_pkg.py
+utils/crm_rpmcheck.py
+utils/crm_script.py
+version
diff --git a/doc/bootstrap-howto.md b/doc/bootstrap-howto.md
new file mode 100644
index 0000000..dac3eec
--- /dev/null
+++ b/doc/bootstrap-howto.md
@@ -0,0 +1,206 @@
+# How to use the Bootstrap commands
+
+## Introduction
+
+`crmsh` includes a set of cluster bootstrapping commands, both for
+setting up an initial cluster, adding and removing nodes in the
+cluster and for setting up geo clusters including arbitrators.
+
+This document is a simplified guide to using these commands. There are
+a lot of optional features that won't be fully covered by this guide,
+but it should serve as a basic introduction to the bootstrap commands.
+
+*Note:* These commands currently work correctly only on SUSE Linux
+ Enterprise or openSUSE, and only if the `csync2` command is installed
+ on all cluster nodes. For users of other distributions, please see
+ the documentation included with your operating system.
+
+## Commands
+
+First, here is the list of commands and a brief description of each
+one. Each command is available in the `crm cluster` namespace, so to
+run the `init` command, either call `crm cluster init` from the shell
+command line or navigate to the `cluster` level in the interactive
+`crm` shell and call the `init` command directly:
+
+* `init` - Initialize a new cluster from scratch.
+* `add` - Add a node to the current cluster.
+* `join` - Add the current node to a cluster.
+* `remove` - Remove a node from the cluster.
+* `geo-init` - Create a new geo cluster with the current cluster as its first member.
+* `geo-init-arbitrator` - Make the current node a geo cluster arbitrator.
+* `geo-join` - Join the current cluster to an existing geo cluster.
+
+## Initializing a basic cluster
+
+For the full documentation of the `init` command, see
+`crm help cluster init` in the interactive shell, or refer to the
+online documentation at [crmsh.github.io](https://crmsh.github.io/).
+
+### Using csync2 to synchronize configuration files
+
+By default, the bootstrap commands make some assumptions about the
+configuration to apply in order to simplify the process. One such
+assumption is that the `csync2` command is installed and available for
+use to synchronize the cluster configuration files across the
+cluster. When initializing the basic cluster, `init` will configure
+SSH access to all cluster nodes, open the necessary ports in the
+firewall, and configure `csync2` so that configuration files can be
+mirrored securely across the cluster.
+
+### Configuring SBD
+
+`SBD` is the storage based fencing mechanism recommended for use with
+pacemaker. Using a storage based fencing method simplifies
+configuration, as access to external hardware such as a lights-out
+device or UPS doesn't have to be configured, and nodes can self-fence
+if they detect reduced connectivity and loss of quorum.
+
+`init` will optionally configure SBD for you. To do this, pass the
+device to use as the SBD shared storage device using the
+`--sbd-device=<device>` argument. It is also possible to configure
+both SBD and a shared storage device formatted with the OCFS2 file
+system, using the `--partition-device=<device>` argument. To use this
+option, enable the `ocfs2` template using `-t ocfs2`.
+
+### Basic Example
+
+This command line when run on the uninitialized node `alice` will
+configure and start a basic cluster on the three nodes `alice`, `bob`
+and `carol`.
+
+```
+init --name basic-cluster --nodes "alice bob carol"
+```
+
+The `--name` argument is optional for regular clusters, but required
+when configuring a geo cluster.
+
+### Non-interactive configuration
+
+To run the initialization steps non-interactively, pass the `-y` or
+`--yes` flag to `init`. The default option will be chosen wherever the
+command would otherwise have prompted for user input. If no default
+option is available and user input is required, the command will
+abort.
+
+### Configuring shared storage
+
+To configure shared storage using the `init` command, make sure that
+you have a storage device accessible from all cluster nodes. This can
+be an iSCSI device provided by a SAN, or a shared storage volume as
+provided by your virtualization platform. To partition this device
+into two volumes, one for use by SBD and one as a shared OCFS2 device,
+use a command line like the following:
+
+```
+init --name storage-cluster --nodes "alice bob carol" -t ocfs2 -p /dev/shared
+```
+
+### Configuring an administration IP
+
+To immediately configure a virtual IP address resource in the cluster,
+use the `-A` argument: `init -A 1.2.3.4`.
+
+The common use case for this virtual IP is to have a single point of
+entry to [Hawk](https://hawk-ui.github.io), the cluster web
+interface. It is also useful as a first example of a cluster
+resource.
+
+## Adding a new node to a cluster
+
+There are two commands for adding a node to a cluster. When running
+the command from one of the existing cluster nodes, use the `add`
+form. For example, if there is an existing cluster consisting of the
+nodes `alice` and `bob`, the following command will add `carol` as the
+third node in the cluster when run from `alice` or `bob`:
+
+```
+alice# crm cluster add carol
+```
+
+It is also possible to add `carol` to the cluster from `carol`
+directly, using the `join` form:
+
+```
+carol# crm cluster join -c alice
+```
+
+Note that `join` takes an argument `-c <node>`.
+
+## Removing a node from a cluster
+
+To remove a node from the cluster, run
+
+```
+crm cluster remove <node>
+```
+
+To remove the last node in a cluster (thereby destroying the cluster),
+it is required to pass the `--force` flag to `crm`:
+
+```
+crm --force cluster remove $(hostname)
+```
+
+## Creating a geo cluster
+
+Once you have a cluster up and running and you made sure to give it a
+sensible name using `--name` (or by editing `corosync.conf` on all
+cluster nodes and restarting the cluster), you can turn that cluster
+into the first member in a geo cluster. Geo clusters are managed by
+the `booth` daemon, so to use these commands, `booth` needs to be
+installed on all cluster nodes.
+
+The `geo-init` command takes as its arguments a complete description
+of the geo cluster. This is because `booth` does not share its
+configuration across the cluster, instead each cluster node in each
+cluster needs to have a copy of the `booth` configuration.
+
+As an example, we will configure a geo cluster consisting of five
+nodes in total: The nodes `alice` and `bob` are members of the
+`amsterdam` cluster. `carol` and `dave` are members of the `london`
+cluster. Finally, `eve` is the arbitrator node located at a third
+site. The `amsterdam` cluster is identified by the virtual IP
+`192.168.100.8`, while the `london` cluster is identified by the
+virtual IP `192.168.100.9`.
+
+The `geo-init` command will configure these virtual IPs in each
+cluster, so there is no need to configure them before-hand.
+
+This geo cluster will share a single ticket, called `mcguffin`.
+
+To create this configuration, run
+
+```
+crm cluster geo-init \
+ --arbitrator eve \
+ --tickets mcguffin \
+ --clusters "amsterdam=192.168.100.8 london=192.168.100.9"
+```
+
+This will configure both the required cluster resources and the booth
+daemon itself in the initial cluster.
+
+## Adding an arbitrator to a geo cluster
+
+This example uses the same basic setup as the `geo-init` example.
+
+To configure the arbitrator `eve`, run the `geo-init-arbitrator`
+command on `eve`, passing the cluster IP of the existing `amsterdam`
+geo cluster member:
+
+```
+crm cluster geo-init-arbitrator \
+ --cluster-node 192.168.100.8
+```
+
+## Adding a second cluster to a geo cluster
+
+To add the `london` cluster to the existing geo cluster described in
+the previous two sections, run the `geo-join` command from one of the
+nodes in the cluster:
+
+```
+crm cluster geo-join --cluster-node 192.168.100.8
+```
diff --git a/doc/bootstrap-todo.md b/doc/bootstrap-todo.md
new file mode 100644
index 0000000..3db7136
--- /dev/null
+++ b/doc/bootstrap-todo.md
@@ -0,0 +1,56 @@
+# Bootstrap TODO
+
+(inherited from the bootstrap project)
+
+## Unclear Responsibility
+
+These may be in purview of ha-cluster-bootstrap, or may be in appliance image:
+
+* install debuginfo packages
+* enable coredumps
+
+
+## General / Random
+
+* csync2_remote assumes there's only one group in csync2.cfg, or, more to
+ the point, will only add new hosts to the first group.
+* Likewise, ssh_merge grabs all hosts regardless of what group they're in
+ (although this is probably fine)
+* get rid of curses junk in log file (fix ENV term)
+* Multi-device SBD (use multiple -s args)
+* Start new node on standby
+ # crm configure node node-1 attributes standby="on"
+ # crm node clearstate node-1 (requires confirmation)
+ - start new node, unbelievably it works!
+ # crm node online node-1
+* don't error to log if log not started
+* is "partx -a <device>" any sort of sane replacement for partprobe?
+* Use ssh-copy-id instead of manual fiddling with authorized_keys?
+
+
+## STONITH Config
+
+* See https://bugzilla.novell.com/show_bug.cgi?id=722405 for stonith timeout suggestions
+
+
+## Template Mode
+
+Generally specific to OCFS2 template ATM, as that's the only one extant.
+
+* Very long path to partition (/dev/disk/by-path/... for iSCSI) means we
+ can't determine paths to new partitions, thanks to bnc#722959. Unclear
+ if this will be fixed for SP2.
+* /dev/disk/by-id/dm-name paths are unreliable (at least for determining
+ partitions after carving the device up).
+* Probably need to prompt user for new partitions after carving, should
+ they not be found (FFS).
+* ocfs2 template is not the same as Hawk's. Consider enhancing
+ ha-cluster-bootstrap so it uses Hawk's templates directly rather than using
+ its own.
+* Ensure required RPMs are installed when running template (they're just
+ Recommends in the spec)
+* Specifying sbd without ocfs2 partition may be incompatible with ocfs2
+ template (need to test)
+* block device size " blockdev --getsz" etc. (when making OCFS2 partition
+ with "-T vmstore")
+
diff --git a/doc/crm.8.adoc b/doc/crm.8.adoc
new file mode 100644
index 0000000..da9fb38
--- /dev/null
+++ b/doc/crm.8.adoc
@@ -0,0 +1,5102 @@
+:man source: crm
+:man version: 4.6.0
+:man manual: crmsh documentation
+
+crm(8)
+======
+
+NAME
+----
+crm - Pacemaker command line interface for configuration and management
+
+
+SYNOPSIS
+--------
+*crm* [OPTIONS] [SUBCOMMAND ARGS...]
+
+
+[[topics_Description,Program description]]
+DESCRIPTION
+-----------
+The `crm` shell is a command-line based cluster configuration and
+management tool. Its goal is to assist as much as possible with the
+configuration and maintenance of Pacemaker-based High Availability
+clusters.
+
+For more information on Pacemaker itself, see http://clusterlabs.org/.
+
+`crm` works both as a command-line tool to be called directly from the
+system shell, and as an interactive shell with extensive tab
+completion and help.
+
+The primary focus of the `crm` shell is to provide a simplified and
+consistent interface to Pacemaker, but it also provides tools for
+managing the creation and configuration of High Availability clusters
+from scratch. To learn more about this aspect of `crm`, see the
+`cluster` section below.
+
+The `crm` shell can be used to manage every aspect of configuring and
+maintaining a cluster. It provides a simplified line-based syntax on
+top of the XML configuration format used by Pacemaker, commands for
+starting and stopping resources, tools for exploring the history of a
+cluster including log scraping and a set of cluster scripts useful for
+automating the setup and installation of services on the cluster
+nodes.
+
+The `crm` shell is line oriented: every command must start and finish
+on the same line. It is possible to use a continuation character (+\+)
+to write one command in two or more lines. The continuation character
+is commonly used when displaying configurations.
+
+[[topics_CommandLine,Command line options]]
+OPTIONS
+-------
+*-f, --file*='FILE'::
+ Load commands from the given file. If a dash +-+ is used in place
+ of a file name, `crm` will read commands from the shell standard
+ input (`stdin`).
+
+*-c, --cib*='CIB'::
+ Start the session using the given shadow CIB file.
+ Equivalent to +cib use <CIB>+.
+
+*-D, --display=*'OUTPUT_TYPE'::
+ Choose one of the output options: +plain+, +color-always+, +color+,
+ or +uppercase+. The default is +color+ if the terminal emulation
+ supports colors. Otherwise, +plain+ is used.
+
+*-F, --force*::
+ Make `crm` proceed with applying changes where it would normally
+ ask the user to confirm before proceeding. This option is mainly
+ useful in scripts, and should be used with care.
+
+*-w, --wait*::
+ Make `crm` wait for the cluster transition to finish (for the
+ changes to take effect) after each processed line.
+
+*-H, --history*='DIR|FILE|SESSION'::
+ A directory or file containing a cluster report to load
+ into the `history` commands, or the name of a previously
+ saved history session.
+
+*-h, --help*::
+ Print help page.
+
+*--version*::
+ Print crmsh version and build information (Mercurial Hg changeset
+ hash).
+
+*-d, --debug*::
+ Print verbose debugging information.
+
+*-R, --regression-tests*::
+ Enables extra verbose trace logging used by the regression
+ tests. Logs all external calls made by crmsh.
+
+*--scriptdir*='DIR'::
+ Extra directory where crm looks for cluster scripts, or a list of
+ directories separated by semi-colons (e.g. +/dir1;/dir2;etc.+).
+
+*-o, --opt*='OPTION=VALUE'::
+ Set crmsh option temporarily. If the options are saved using
+ +options save+ then the value passed here will also be saved.
+ Multiple options can be set by using +-o+ multiple times.
+
+[[topics_Introduction,Introduction]]
+== Introduction
+
+This section of the user guide covers general topics about the user
+interface and describes some of the features of `crmsh` in detail.
+
+[[topics_Introduction_Interface,User interface]]
+=== User interface
+
+The main purpose of `crmsh` is to provide a simple yet powerful
+interface to the cluster stack. There are two main modes of operation
+with the user interface of `crmsh`:
+
+* Command line (single-shot) use - Use `crm` as a regular UNIX command
+ from your usual shell. `crm` has full bash completion built in, so
+ using it in this manner should be as comfortable and familiar as
+ using any other command-line tool.
+
+* Interactive mode - By calling `crm` without arguments, or by calling
+ it with only a sublevel as argument, `crm` enters the interactive
+ mode. In this mode, it acts as its own command shell, which
+ remembers which sublevel you are currently in and allows for rapid
+ and convenient execution of multiple commands within the same
+ sublevel. This mode also has full tab completion, as well as
+ built-in interactive help and syntax highlighting.
+
+Here are a few examples of using `crm` both as a command-line tool and
+as an interactive shell:
+
+.Command line (one-shot) use:
+........
+# crm resource stop www_app
+........
+
+.Interactive use:
+........
+# crm
+crm(live)# resource
+crm(live)resource# unmanage tetris_1
+crm(live)resource# up
+crm(live)# node standby node4
+........
+
+.Cluster configuration:
+........
+# crm configure<<EOF
+ #
+ # resources
+ #
+ primitive disk0 iscsi \
+ params portal=192.168.2.108:3260 target=iqn.2008-07.com.suse:disk0
+ primitive fs0 Filesystem \
+ params device=/dev/disk/by-label/disk0 directory=/disk0 fstype=ext3
+ primitive internal_ip IPaddr params ip=192.168.1.101
+ primitive apache apache \
+ params configfile=/disk0/etc/apache2/site0.conf
+ primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s
+ primitive pingd ocf:pacemaker:ping \
+ params name=pingd dampen=5s multiplier=100 host_list="r1 r2"
+ #
+ # monitor apache and the UPS
+ #
+ monitor apache 60s:30s
+ monitor apcfence 120m:60s
+ #
+ # cluster layout
+ #
+ group internal_www \
+ disk0 fs0 internal_ip apache
+ clone fence apcfence \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ clone conn pingd \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ location node_pref internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+ #
+ # cluster properties
+ #
+ property stonith-enabled=true
+ commit
+EOF
+........
+
+The `crm` interface is hierarchical, with commands organized into
+separate levels by functionality. To list the available levels and
+commands, either execute +help <level>+, or, if at the top level of
+the shell, simply typing `help` will provide an overview of all
+available levels and commands.
+
+The +(live)+ string in the `crm` prompt signifies that the current CIB
+in use is the cluster live configuration. It is also possible to
+work with so-called <<topics_Features_Shadows,shadow CIBs>>. These are separate, inactive
+configurations stored in files, that can be applied and thereby
+replace the live configuration at any time.
+
+[[topics_Introduction_Completion,Tab completion]]
+=== Tab completion
+
+The `crm` makes extensive use of tab completion. The completion
+is both static (i.e. for `crm` commands) and dynamic. The latter
+takes into account the current status of the cluster or
+information from installed resource agents. Sometimes, completion
+may also be used to get short help on resource parameters. Here
+are a few examples:
+
+...............
+crm(live)resource# <TAB><TAB>
+ban demote maintenance param scores trace
+cd failcount manage promote secret unmanage
+cleanup help meta quit start untrace
+clear locate move refresh status up
+constraints ls operations restart stop utilization
+
+crm(live)configure# primitive fence-1 <TAB><TAB>
+lsb: ocf: service: stonith: systemd:
+
+crm(live)configure# primitive fence-1 stonith:<TAB><TAB>
+apcmaster external/ippower9258 fence_legacy
+apcmastersnmp external/kdumpcheck ibmhmc
+apcsmart external/libvirt ipmilan
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params <TAB><TAB>
+auth= hostname= ipaddr= login= password= port= priv=
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params auth=<TAB><TAB>
+auth* (string)
+ The authorization type of the IPMI session ("none", "straight", "md2", or "md5")
+...............
+
+`crmsh` also comes with bash completion usable directly from the
+system shell. This should be installed automatically with the command
+itself.
+
+[[topics_Introduction_Shorthand,Shorthand syntax]]
+=== Shorthand syntax
+
+When using the `crm` shell to manage clusters, you will end up typing
+a lot of commands many times over. Clear command names like
++configure+ help in understanding and learning to use the cluster
+shell, but is easy to misspell and is tedious to type repeatedly. The
+interactive mode and tab completion both help with this, but the `crm`
+shell also has the ability to understand a variety of shorthand
+aliases for all of the commands.
+
+For example, instead of typing `crm status`, you can type `crm st` or
+`crm stat`. Instead of `crm configure` you can type `crm cfg` or even
+`crm cf`. `crm resource` can be shorted as `crm rsc`, and so on.
+
+The exact list of accepted aliases is too long to print in full, but
+experimentation and typos should help in discovering more of them.
+
+[[topics_Features,Features]]
+== Features
+
+The feature set of crmsh covers a wide range of functionality, and
+understanding how and when to use the various features of the shell
+can be difficult. This section of the guide describes some of the
+features and use cases of `crmsh` in more depth. The intention is to
+provide a deeper understanding of these features, but also to serve as
+a guide to using them.
+
+[[topics_Features_Shadows,Shadow CIB usage]]
+=== Shadow CIB usage
+
+A Shadow CIB is a normal cluster configuration stored in a file.
+They may be manipulated in much the same way as the _live_ CIB, with
+the key difference that changes to a shadow CIB have no effect on the
+actual cluster resources. An administrator may choose to apply any of
+them to the cluster, thus replacing the running configuration with the
+one found in the shadow CIB.
+
+The `crm` prompt always contains the name of the configuration which
+is currently in use, or the string _live_ if using the live cluster
+configuration.
+
+When editing the configuration in the `configure` level, no changes
+are actually applied until the `commit` command is executed. It is
+possible to start editing a configuration as usual, but instead of
+committing the changes to the active CIB, save them to a shadow CIB.
+
+The following example `configure` session demonstrates how this can be
+done:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+
+[[topics_Features_Checks,Configuration semantic checks]]
+=== Configuration semantic checks
+
+Resource definitions may be checked against the meta-data
+provided with the resource agents. These checks are currently
+carried out:
+
+- are required parameters set
+- existence of defined parameters
+- timeout values for operations
+
+The parameter checks are obvious and need no further explanation.
+Failures in these checks are treated as configuration errors.
+
+The timeouts for operations should be at least as long as those
+recommended in the meta-data. Too short timeout values are a
+common mistake in cluster configurations and, even worse, they
+often slip through if cluster testing was not thorough. Though
+operation timeouts issues are treated as warnings, make sure that
+the timeouts are usable in your environment. Note also that the
+values given are just _advisory minimum_---your resources may
+require longer timeouts.
+
+User may tune the frequency of checks and the treatment of errors
+by the <<cmdhelp_options_check-frequency,`check-frequency`>> and
+<<cmdhelp_options_check-mode,`check-mode`>> preferences.
+
+Note that if the +check-frequency+ is set to +always+ and the
++check-mode+ to +strict+, errors are not tolerated and such
+configuration cannot be saved.
+
+[[topics_Features_Templates,Configuration templates]]
+=== Configuration templates
+
+.Deprecation note
+****************************
+Configuration templates have been deprecated in favor of the more
+capable `cluster scripts`. To learn how to use cluster scripts, see
+the dedicated documentation on the `crmsh` website at
+http://crmsh.github.io/, or in the <<cmdhelp_script,Script section>>.
+****************************
+
+Configuration templates are ready made configurations created by
+cluster experts. They are designed in such a way so that users
+may generate valid cluster configurations with minimum effort.
+If you are new to Pacemaker, templates may be the best way to
+start.
+
+We will show here how to create a simple yet functional Apache
+configuration:
+...............
+# crm configure
+crm(live)configure# template
+crm(live)configure template# list templates
+apache filesystem virtual-ip
+crm(live)configure template# new web <TAB><TAB>
+apache filesystem virtual-ip
+crm(live)configure template# new web apache
+INFO: pulling in template apache
+INFO: pulling in template virtual-ip
+crm(live)configure template# list
+web2-d web2 vip2 web3 vip web
+...............
+
+We enter the `template` level from `configure`. Use the `list`
+command to show templates available on the system. The `new`
+command creates a configuration from the +apache+ template. You
+can use tab completion to pick templates. Note that the apache
+template depends on a virtual IP address which is automatically
+pulled along. The `list` command shows the just created +web+
+configuration, among other configurations (I hope that you,
+unlike me, will use more sensible and descriptive names).
+
+The `show` command, which displays the resulting configuration,
+may be used to get an idea about the minimum required changes
+which have to be done. All +ERROR+ messages show the line numbers
+in which the respective parameters are to be defined:
+...............
+crm(live)configure template# show
+ERROR: 23: required parameter ip not set
+ERROR: 61: required parameter id not set
+ERROR: 65: required parameter configfile not set
+crm(live)configure template# edit
+...............
+
+The `edit` command invokes the preferred text editor with the
++web+ configuration. At the top of the file, the user is advised
+how to make changes. A good template should require from the user
+to specify only parameters. For example, the +web+ configuration
+we created above has the following required and optional
+parameters (all parameter lines start with +%%+):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip
+31:%% netmask
+35:%% lvs_support
+61:%% id
+65:%% configfile
+71:%% options
+76:%% envfiles
+...............
+
+These lines are the only ones that should be modified. Simply
+append the parameter value at the end of the line. For instance,
+after editing this template, the result could look like this (we
+used tabs instead of spaces to make the values stand out):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip 192.168.1.101
+31:%% netmask
+35:%% lvs_support
+61:%% id websvc
+65:%% configfile /etc/apache2/httpd.conf
+71:%% options
+76:%% envfiles
+...............
+
+As you can see, the parameter line format is very simple:
+...............
+%% <name> <value>
+...............
+
+After editing the file, use `show` again to display the
+configuration:
+...............
+crm(live)configure template# show
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf"
+monitor apache 120s:60s
+group websvc \
+ apache virtual-ip
+...............
+
+The target resource of the apache template is a group which we
+named +websvc+ in this sample session.
+
+This configuration looks exactly as you could type it at the
+`configure` level. The point of templates is to save you some
+typing. It is important, however, to understand the configuration
+produced.
+
+Finally, the configuration may be applied to the current
+crm configuration (note how the configuration changed slightly,
+though it is still equivalent, after being digested at the
+`configure` level):
+...............
+crm(live)configure template# apply
+crm(live)configure template# cd ..
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache virtual-ip
+...............
+
+Note that this still does not commit the configuration to the CIB
+which is used in the shell, either the running one (+live+) or
+some shadow CIB. For that you still need to execute the `commit`
+command.
+
+To complete our example, we should also define the preferred node
+to run the service:
+
+...............
+crm(live)configure# location websvc-pref websvc 100: xen-b
+...............
+
+If you are not happy with some resource names which are provided
+by default, you can rename them now:
+
+...............
+crm(live)configure# rename virtual-ip intranet-ip
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive intranet-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+To summarize, working with templates typically consists of the
+following steps:
+
+- `new`: create a new configuration from templates
+- `edit`: define parameters, at least the required ones
+- `show`: see if the configuration is valid
+- `apply`: apply the configuration to the `configure` level
+
+[[topics_Features_Testing,Resource testing]]
+=== Resource testing
+
+The amount of detail in a cluster makes all configurations prone
+to errors. By far the largest number of issues in a cluster is
+due to bad resource configuration. The shell can help quickly
+diagnose such problems. And considerably reduce your keyboard
+wear.
+
+Let's say that we entered the following configuration:
+...............
+node xen-b
+node xen-c
+node xen-d
+primitive fencer stonith:external/libvirt \
+ params hypervisor_uri="qemu+tcp://10.2.13.1/system" \
+ hostlist="xen-b xen-c xen-d" \
+ op monitor interval=2h
+primitive svc Xinetd \
+ params service=systat \
+ op monitor interval=30s
+primitive intranet-ip IPaddr2 \
+ params ip=10.2.13.100 \
+ op monitor interval=30s
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+Before typing `commit` to submit the configuration to the cib we
+can make sure that all resources are usable on all nodes:
+...............
+crm(live)configure# rsctest websvc svc fencer
+...............
+
+It is important that resources being tested are not running on
+any nodes. Otherwise, the `rsctest` command will refuse to do
+anything. Of course, if the current configuration resides in a
+CIB shadow, then a `commit` is irrelevant. The point being that
+resources are not running on any node.
+
+.Note on stopping all resources
+****************************
+Alternatively to not committing a configuration, it is also
+possible to tell Pacemaker not to start any resources:
+
+...............
+crm(live)configure# property stop-all-resources=yes
+...............
+Almost none---resources of class stonith are still started. But
+shell is not as strict when it comes to stonith resources.
+****************************
+
+Order of resources is significant insofar that a resource depends
+on all resources to its left. In most configurations, it's
+probably practical to test resources in several runs, based on
+their dependencies.
+
+Apart from groups, `crm` does not interpret constraints and
+therefore knows nothing about resource dependencies. It also
+doesn't know if a resource can run on a node at all in case of an
+asymmetric cluster. It is up to the user to specify a list of
+eligible nodes if a resource is not meant to run on every node.
+
+[[topics_Features_Security,Access Control Lists (ACL)]]
+=== Access Control Lists (ACL)
+
+.Note on ACLs in Pacemaker 1.1.12
+****************************
+The support for ACLs has been revised in Pacemaker version 1.1.12 and
+up. Depending on which version you are using, the information in this
+section may no longer be accurate. Look for the `acl_target`
+configuration element for more details on the new syntax.
+****************************
+
+By default, the users from the +haclient+ group have full access
+to the cluster (or, more precisely, to the CIB). Access control
+lists allow for finer access control to the cluster.
+
+Access control lists consist of an ordered set of access rules.
+Each rule allows read or write access or denies access
+completely. Rules are typically combined to produce a specific
+role. Then, users may be assigned a role.
+
+For instance, this is a role which defines a set of rules
+allowing management of a single resource:
+
+...............
+role bigdb_admin \
+ write meta:bigdb:target-role \
+ write meta:bigdb:is-managed \
+ write location:bigdb \
+ read ref:bigdb
+...............
+
+The first two rules allow modifying the +target-role+ and
++is-managed+ meta attributes which effectively enables users in
+this role to stop/start and manage/unmanage the resource. The
+constraints write access rule allows moving the resource around.
+Finally, the user is granted read access to the resource
+definition.
+
+For proper operation of all Pacemaker programs, it is advisable
+to add the following role to all users:
+
+...............
+role read_all \
+ read cib
+...............
+
+For finer grained read access try with the rules listed in the
+following role:
+
+...............
+role basic_read \
+ read node attribute:uname \
+ read node attribute:type \
+ read property \
+ read status
+...............
+
+It is however possible that some Pacemaker programs (e.g.
+`ptest`) may not function correctly if the whole CIB is not
+readable.
+
+Some of the ACL rules in the examples above are expanded by the
+shell to XPath specifications. For instance,
++meta:bigdb:target-role+ expands to:
+
+........
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+........
+
+You can see the expansion by showing XML:
+
+...............
+crm(live) configure# show xml bigdb_admin
+...
+<acls>
+ <acl_role id="bigdb_admin">
+ <write id="bigdb_admin-write"
+ xpath="//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']"/>
+...............
+
+Many different XPath expressions can have equal meaning. For
+instance, the following two are equal, but only the first one is
+going to be recognized as shortcut:
+
+...............
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+//resources/primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+...............
+
+XPath is a powerful language, but you should try to keep your ACL
+xpaths simple and the builtin shortcuts should be used whenever
+possible.
+
+[[topics_Features_Resourcesets,Syntax: Resource sets]]
+=== Syntax: Resource sets
+
+Using resource sets can be a bit confusing unless one knows the
+details of the implementation in Pacemaker as well as how to interpret
+the syntax provided by `crmsh`.
+
+Three different types of resource sets are provided by `crmsh`, and
+each one implies different values for the two resource set attributes,
++sequential+ and +require-all+.
+
++sequential+::
+ If false, the resources in the set do not depend on each other
+ internally. Setting +sequential+ to +true+ implies a strict order of
+ dependency within the set.
+
++require-all+::
+ If false, only one resource in the set is required to fulfil the
+ requirements of the set. The set of A, B and C with +require-all+
+ set to +false+ is be read as "A OR B OR C" when its dependencies
+ are resolved.
+
+The three types of resource sets modify the attributes in the
+following way:
+
+1. Implicit sets (no brackets). +sequential=true+, +require-all=true+
+2. Parenthesis set (+(+ ... +)+). +sequential=false+, +require-all=true+
+3. Bracket set (+[+ ... +]+). +sequential=false+, +require-all=false+
+
+To create a set with the properties +sequential=true+ and
++require-all=false+, explicitly set +sequential+ in a bracketed set,
++[ A B C sequential=true ]+.
+
+To create multiple sets with both +sequential+ and +require-all+ set to
+true, explicitly set +sequential+ in a parenthesis set:
++A B ( C D sequential=true )+.
+
+[[topics_Features_AttributeListReferences,Syntax: Attribute list references]]
+=== Syntax: Attribute list references
+
+Attribute lists are used to set attributes and parameters for
+resources, constraints and property definitions. For example, to set
+the virtual IP used by an +IPAddr2+ resource the attribute +ip+ can be
+set in an attribute list for that resource.
+
+Attribute lists can have identifiers that name them, and other
+resources can reuse the same attribute list by referring to that name
+using an +$id-ref+. For example, the following statement defines a
+simple dummy resource with an attribute list which sets the parameter
++state+ to the value 1 and sets the identifier for the attribute list
+to +on-state+:
+
+..............
+primitive dummy-1 Dummy params $id=on-state state=1
+..............
+
+To refer to this attribute list from a different resource, refer to
+the +on-state+ name using an id-ref:
+
+..............
+primitive dummy-2 Dummy params $id-ref=on-state
+..............
+
+The resource +dummy-2+ will now also have the parameter +state+ set to the value 1.
+
+[[topics_Features_AttributeReferences,Syntax: Attribute references]]
+=== Syntax: Attribute references
+
+In some cases, referencing complete attribute lists is too
+coarse-grained, for example if two different parameters with different
+names should have the same value set. Instead of having to copy the
+value in multiple places, it is possible to create references to
+individual attributes in attribute lists.
+
+To name an attribute in order to be able to refer to it later, prefix
+the attribute name with a +$+ character (as seen above with the
+special names +$id+ and +$id-ref+:
+
+............
+primitive dummy-1 Dummy params $state=1
+............
+
+The identifier +state+ can now be used to refer to this attribute from other
+primitives, using the +@<id>+ syntax:
+
+............
+primitive dummy-2 Dummy params @state
+............
+
+In some cases, using the attribute name as the identifier doesn't work
+due to name clashes. In this case, the syntax +$<id>:<name>=<value>+
+can be used to give the attribute a different identifier:
+
+............
+primitive dummy-1 params $dummy-state-on:state=1
+primitive dummy-2 params @dummy-state-on
+............
+
+There is also the possibility that two resources both use the same
+attribute value but with different names. For example, a web server
+may have a parameter +server_ip+ for setting the IP address where it
+listens for incoming requests, and a virtual IP resource may have a
+parameter called +ip+ which sets the IP address it creates. To
+configure these two resources with an IP without repeating the value,
+the reference can be given a name using the syntax +@<id>:<name>+.
+
+Example:
+............
+primitive virtual-ip IPaddr2 params $vip:ip=192.168.1.100
+primitive webserver apache params @vip:server_ip
+............
+
+[[topics_Syntax_RuleExpressions,Syntax: Rule expressions]]
+=== Syntax: Rule expressions
+
+Many of the configuration commands in `crmsh` now support the use of
+_rule expressions_, which can influence what attributes apply to a
+resource or under which conditions a constraint is applied, depending
+on changing conditions like date, time, the value of attributes and
+more.
+
+Here is an example of a simple rule expression used to apply a
+a different resource parameter on the node named `node1`:
+
+..............
+primitive my_resource Special \
+ params 2: rule #uname eq node1 interface=eth1 \
+ params 1: interface=eth0
+..............
+
+This primitive resource has two lists of parameters with descending
+priority. The parameter list with the highest priority is applied
+first, but only if the rule expressions for that parameter list all
+apply. In this case, the rule `#uname eq node1` limits the parameter
+list so that it is only applied on `node1`.
+
+Note that rule expressions are not terminated and are immediately
+followed by the data to which the rule is applied. In this case, the
+name-value pair `interface=eth1`.
+
+Rule expressions can contain multiple expressions connected using the
+boolean operator `or` and `and`. The full syntax for rule expressions
+is listed below.
+
+..............
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: <string> | <version> | <number>
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+..............
+
+[[topics_Lifetime,Lifetime parameter format]]
+== Lifetime parameter format
+
+Lifetimes can be specified in the ISO 8601 time format or the ISO 8601
+duration format. To distinguish between months and minutes, use the PT
+prefix before specifying minutes. The duration format is one of
++PnYnMnDTnHnMnS+, +PnW+, +P<date>T<time>+.
+
+P = duration. Y = year. M = month. W = week. D = day. T = time. H =
+hour. M = minute. S = second.
+
+Examples:
+.................
+PT5M = 5 minutes later.
+3D = 3 days later.
+PT1H = 1 hour later.
+.................
+
+The cluster checks lifetimes at an interval defined by the
+cluster-recheck-interval property (default 15 minutes).
+
+
+[[topics_Reference,Command reference]]
+== Command reference
+
+The commands are structured to be compatible with the shell command
+line. Sometimes, the underlying Pacemaker grammar uses characters that
+have special meaning in bash, that will need to be quoted. This
+includes the hash or pound sign (`#`), single and double quotes, and
+any significant whitespace.
+
+Whitespace is also significant when assigning values, meaning that
++key=value+ is different from +key = value+.
+
+Commands can be referenced using short-hand as long as the short-hand
+is unique. This can be either a prefix of the command name or a prefix
+string of characters found in the name.
+
+For example, +status+ can be abbreviated as +st+ or +su+, and
++configure+ as +conf+ or +cfg+.
+
+The syntax for the commands is given below in an informal, BNF-like
+grammar.
+
+* `<value>` denotes a string.
+* `[value]` means that the construct is optional.
+* The ellipsis (`...`) signifies that the previous construct may be
+ repeated.
+* `first|second` means either first or second.
+* The rest are literals (strings, `:`, `=`).
+
+[[cmdhelp_root_status,Cluster status]]
+=== `status`
+
+Show cluster status. The status is displayed by `crm_mon`. Supply
+additional arguments for more information or different format.
+See `crm_mon(8)` for more details.
+
+Example:
+...............
+status
+status simple
+status full
+...............
+
+Usage:
+...............
+status [<option> ...]
+
+option :: full
+ | bynode
+ | inactive
+ | ops
+ | timing
+ | failcounts
+ | verbose
+ | quiet
+ | html
+ | xml
+ | simple
+ | tickets
+ | noheaders
+ | detail
+ | brief
+...............
+
+[[cmdhelp_root_verify,Verify cluster status]]
+=== `verify`
+
+Performs basic checks for the cluster configuration and
+current status, reporting potential issues.
+
+See `crm_verify(8)` and `crm_simulate(8)` for more details.
+
+Example:
+...............
+verify
+verify scores
+...............
+
+Usage:
+...............
+verify [scores]
+...............
+
+[[cmdhelp_cluster,Cluster setup and management]]
+=== `cluster` - Cluster setup and management
+
+Whole-cluster configuration management with High Availability
+awareness.
+
+The commands on the cluster level allows configuration and
+modification of the underlying cluster infrastructure, and also
+supplies tools to do whole-cluster systems management.
+
+These commands enable easy installation and maintenance of a HA
+cluster, by providing support for package installation, configuration
+of the cluster messaging layer, file system setup and more.
+
+[[cmdhelp_cluster_copy,Copy file to other cluster nodes]]
+==== `copy`
+
+Copy file to other cluster nodes.
+
+Copies the given file to all other nodes unless given a
+list of nodes to copy to as argument.
+
+Usage:
+...............
+copy <filename> [nodes ...]
+...............
+
+Example:
+...............
+copy /etc/motd
+...............
+
+[[cmdhelp_cluster_diff,Diff file across cluster]]
+==== `diff`
+
+Displays the difference, if any, between a given file
+on different nodes. If the second argument is `--checksum`,
+a checksum of the file will be calculated and displayed for
+each node.
+
+Usage:
+...............
+diff <file> [--checksum] [nodes...]
+...............
+
+Example:
+...............
+diff /etc/crm/crm.conf node2
+diff /etc/resolv.conf --checksum
+...............
+
+[[cmdhelp_cluster_disable,Disable cluster services,From Code]]
+==== `disable`
+See "crm cluster help disable" or "crm cluster disable --help"
+
+[[cmdhelp_cluster_enable,Enable cluster services,From Code]]
+==== `enable`
+See "crm cluster help enable" or "crm cluster enable --help
+
+[[cmdhelp_cluster_geo_init,Configure cluster as geo cluster,From Code]]
+==== `geo-init`
+See "crm cluster help geo_init" or "crm cluster geo_init --help"
+
+[[cmdhelp_cluster_geo_init_arbitrator,Initialize node as geo cluster arbitrator,From Code]]
+==== `geo-init-arbitrator`
+See "crm cluster help geo_init_arbitrator" or "crm cluster geo_init_arbitrator --help"
+
+[[cmdhelp_cluster_geo_join,Join cluster to existing geo cluster,From Code]]
+==== `geo-join`
+See "crm cluster help geo_join" or "crm cluster geo_join --help"
+
+[[cmdhelp_cluster_health,Cluster health check]]
+==== `health`
+
+Runs a larger set of tests and queries on all nodes in the cluster to
+verify the general system health and detect potential problems.
+
+Usage:
+...............
+health
+...............
+
+[[cmdhelp_cluster_init,Initializes a new HA cluster,From Code]]
+==== `init`
+See "crm cluster help init" or "crm cluster init --help"
+
+[[cmdhelp_cluster_join,Join existing cluster,From Code]]
+==== `join`
+See "crm cluster help join" or "crm cluster join --help"
+
+[[cmdhelp_cluster_remove,Remove node(s) from the cluster,From Code]]
+==== `remove`
+See "crm cluster help remove" or "crm cluster remove --help"
+
+[[cmdhelp_cluster_crash_test,Cluster crash test tool set,From Code]]
+==== `crash_test`
+See "crm cluster help crash_test" or "crm cluster crash_test --help"
+
+[[cmdhelp_cluster_restart,Restart cluster services,From Code]]
+==== `restart`
+See "crm cluster help restart" or "crm cluster restart --help"
+
+[[cmdhelp_cluster_rename,Rename the cluster]]
+==== `rename`
+
+Rename the cluster name
+
+Usage:
+...............
+rename <new_cluster_name>
+...............
+
+
+[[cmdhelp_cluster_run,Execute an arbitrary command on all nodes/specific node]]
+==== `run`
+
+This command takes a shell statement as argument, executes that
+statement on all nodes in the cluster or a specific node,
+and reports the result.
+
+Usage:
+...............
+run <command> [node ...]
+...............
+
+Example:
+...............
+run "cat /proc/uptime"
+run "ls" node1 node2
+...............
+
+[[cmdhelp_cluster_start,Start cluster services,From Code]]
+==== `start`
+See "crm cluster help start" or "crm cluster start --help"
+
+[[cmdhelp_cluster_status,Cluster status check]]
+==== `status`
+
+Reports the status for the cluster messaging layer on the local
+node.
+
+Usage:
+...............
+status
+...............
+
+[[cmdhelp_cluster_stop,Stop cluster services,From Code]]
+==== `stop`
+See "crm cluster help stop" or "crm cluster stop --help"
+
+[[cmdhelp_cluster_wait_for_startup,Wait for cluster to start]]
+==== `wait_for_startup`
+
+Mostly useful in scripts or automated workflows, this command will
+attempt to connect to the local cluster node repeatedly. The command
+will keep trying until the cluster node responds, or the `timeout`
+elapses. The timeout can be changed by supplying a value in seconds as
+an argument.
+
+Usage:
+........
+wait_for_startup
+........
+
+[[cmdhelp_script,Cluster script management]]
+=== `script` - Cluster script management
+
+A big part of the configuration and management of a cluster is
+collecting information about all cluster nodes and deploying changes
+to those nodes. Often, just performing the same procedure on all nodes
+will encounter problems, due to subtle differences in the
+configuration.
+
+For example, when configuring a cluster for the first time, the
+software needs to be installed and configured on all nodes before the
+cluster software can be launched and configured using `crmsh`. This
+process is cumbersome and error-prone, and the goal is for scripts to
+make this process easier.
+
+Scripts are implemented using the python `parallax` package which
+provides a thin wrapper on top of SSH. This allows the scripts to
+function through the usual SSH channels used for system maintenance,
+requiring no additional software to be installed or maintained.
+
+[[cmdhelp_script_json,JSON API for cluster scripts]]
+==== `json`
+
+This command provides a JSON API for the cluster scripts, intended for
+use in user interface tools that want to interact with the cluster via
+scripts.
+
+The command takes a single argument, which should be a JSON array with
+the first member identifying the command to perform.
+
+The output is line-based: Commands that return multiple results will
+return them line-by-line, ending with a terminator value: "end".
+
+When providing parameter values to this command, they should be
+provided as nested objects, so +virtual-ip:ip=192.168.0.5+ on the
+command line becomes the JSON object
++{"virtual-ip":{"ip":"192.168.0.5"}}+.
+
+API:
+........
+["list"]
+=> [{name, shortdesc, category}]
+
+["show", <name>]
+=> [{name, shortdesc, longdesc, category, <<steps>>}]
+
+<<steps>> := [{name, shortdesc], longdesc, required, parameters, steps}]
+
+<<params>> := [{name, shortdesc, longdesc, required, unique, advanced,
+ type, value, example}]
+
+["verify", <name>, <<values>>]
+=> [{shortdesc, longdesc, text, nodes}]
+
+["run", <name>, <<values>>]
+=> [{shortdesc, rc, output|error}]
+........
+
+
+[[cmdhelp_script_list,List available scripts]]
+==== `list`
+
+Lists the available scripts, sorted by category. Scripts that have the
+special `Script` category are hidden by default, since they are mainly
+used by other scripts or commands. To also show these, pass `all` as
+argument.
+
+To get a flat list of script names, not sorted by category, pass
+`names` as an extra argument.
+
+Usage:
+............
+list [all] [names]
+............
+
+Example:
+............
+list
+list all names
+............
+
+[[cmdhelp_script_run,Run the script]]
+==== `run`
+
+Given a list of parameter values, this command will execute the
+actions specified by the cluster script. The format for the parameter
+values is the same as for the `verify` command.
+
+Can optionally take at least two parameters:
+* `nodes=<nodes>`: List of nodes that the script runs over
+* `dry_run=yes|no`: If set, the script will not perform any modifications.
+
+Additional parameters may be available depending on the script.
+
+Use the `show` command to see what parameters are available.
+
+Usage:
+.............
+run <script> [args...]
+.............
+
+Example:
+.............
+run apache install=true
+run sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+.............
+
+[[cmdhelp_script_show,Describe the script]]
+==== `show`
+
+Prints a description and short summary of the script, with
+descriptions of the accepted parameters.
+
+Advanced parameters are hidden by default. To show the complete list
+of parameters accepted by the script, pass `all` as argument.
+
+Usage:
+............
+show <script> [all]
+............
+
+Example:
+............
+show virtual-ip
+............
+
+[[cmdhelp_script_verify,Verify the script]]
+==== `verify`
+
+Checks the given parameter values, and returns a list
+of actions that will be executed when running the script
+if provided the same list of parameter values.
+
+Usage:
+............
+verify <script> [args...]
+............
+
+Example:
+............
+verify sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+............
+
+[[cmdhelp_corosync,Corosync management]]
+=== `corosync` - Corosync management
+
+Corosync is the underlying messaging layer for most HA clusters.
+This level provides commands for editing and managing the corosync
+configuration.
+
+[[cmdhelp_corosync_add-node,Add a corosync node]]
+==== `add-node`
+
+Adds a node to the corosync configuration. This is used with the `udpu`
+type configuration in corosync.
+
+A nodeid for the added node is generated automatically.
+
+Note that this command assumes that only a single ring is used, and
+sets only the address for ring0.
+
+Usage:
+.........
+add-node <addr> [name]
+.........
+
+[[cmdhelp_corosync_del-node,Remove a corosync node]]
+==== `del-node`
+
+Removes a node from the corosync configuration. The argument given is
+the `ring0_addr` address set in the configuration file.
+
+Usage:
+.........
+del-node <addr>
+.........
+
+[[cmdhelp_corosync_diff,Diffs the corosync configuration]]
+==== `diff`
+
+Diffs the corosync configurations on different nodes. If no nodes are
+given as arguments, the corosync configurations on all nodes in the
+cluster are compared.
+
+`diff` takes an option argument `--checksum`, to display a checksum
+for each file instead of calculating a diff.
+
+Usage:
+.........
+diff [--checksum] [node...]
+.........
+
+[[cmdhelp_corosync_edit,Edit the corosync configuration]]
+==== `edit`
+
+Opens the Corosync configuration file in an editor.
+
+Usage:
+.........
+edit
+.........
+
+[[cmdhelp_corosync_get,Get a corosync configuration value]]
+==== `get`
+
+Returns the value configured in `corosync.conf`, which is not
+necessarily the value used in the running configuration. See `reload`
+for telling corosync about configuration changes.
+
+The argument is the complete dot-separated path to the value.
+
+If there are multiple values configured with the same path, the
+command returns all values for that path. For example, to get all
+configured `ring0_addr` values, use this command:
+
+Example:
+........
+get nodelist.node.ring0_addr
+........
+
+[[cmdhelp_corosync_log,Show the corosync log file]]
+==== `log`
+
+Opens the log file specified in the corosync configuration file. If no
+log file is configured, this command returns an error.
+
+The pager used can be configured either using the PAGER
+environment variable or in `crm.conf`.
+
+Usage:
+.........
+log
+.........
+
+[[cmdhelp_corosync_pull,Pulls the corosync configuration]]
+==== `pull`
+
+Gets the corosync configuration from another node and copies
+it to this node.
+
+Usage:
+.........
+pull <node>
+.........
+
+[[cmdhelp_corosync_push,Push the corosync configuration]]
+==== `push`
+
+Pushes the corosync configuration file on this node to
+the list of nodes provided. If no target nodes are given,
+the configuration is pushed to all other nodes in the cluster.
+
+It is recommended to use `csync2` to distribute the cluster
+configuration files rather than relying on this command.
+
+Usage:
+.........
+push [node] ...
+.........
+
+Example:
+.........
+push node-2 node-3
+.........
+
+[[cmdhelp_corosync_reload,Reload the corosync configuration]]
+==== `reload`
+
+Tells all instances of corosync in this cluster to reload
+`corosync.conf`.
+
+After pushing a new configuration to all cluster nodes, call this
+command to make corosync use the new configuration.
+
+Usage:
+.........
+reload
+.........
+
+[[cmdhelp_corosync_set,Set a corosync configuration value]]
+==== `set`
+
+Sets the value identified by the given path. If the value does not
+exist in the configuration file, it will be added. However, if the
+section containing the value does not exist, the command will fail.
+
+Usage:
+.........
+set quorum.expected_votes 2
+.........
+
+[[cmdhelp_corosync_show,Display the corosync configuration]]
+==== `show`
+
+Displays the corosync configuration on the current node.
+
+.........
+show
+.........
+
+[[cmdhelp_corosync_status,Display the corosync status]]
+==== `status`
+
+Displays the corosync ring status(default), also can display quorum/qdevice/qnetd status.
+
+Usage:
+.........
+status [ring|quorum|qdevice|qnetd]
+.........
+
+[[cmdhelp_cib,CIB shadow management]]
+=== `cib` - CIB shadow management
+
+This level is for management of shadow CIBs. It is available both
+at the top level and the `configure` level.
+
+All the commands are implemented using `cib_shadow(8)` and the
+`CIB_shadow` environment variable. The user prompt always
+includes the name of the currently active shadow or the live CIB.
+
+[[cmdhelp_cib_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_cib_commit,copy a shadow CIB to the cluster]]
+==== `commit`
+
+Apply a shadow CIB to the cluster. If the shadow name is omitted
+then the current shadow CIB is applied.
+
+Temporary shadow CIBs are removed automatically on commit.
+
+Usage:
+...............
+commit [<cib>]
+...............
+
+[[cmdhelp_cib_delete,delete a shadow CIB]]
+==== `delete`
+
+Delete an existing shadow CIB.
+
+Usage:
+...............
+delete <cib>
+...............
+
+[[cmdhelp_cib_diff,diff between the shadow CIB and the live CIB]]
+==== `diff`
+
+Print differences between the current cluster configuration and
+the active shadow CIB.
+
+Usage:
+...............
+diff
+...............
+
+[[cmdhelp_cib_import,import a CIB or PE input file to a shadow]]
+==== `import`
+
+At times it may be useful to create a shadow file from the
+existing CIB. The CIB may be specified as file or as a PE input
+file number. The shell will look up files in the local directory
+first and then in the PE directory (typically `/var/lib/pengine`).
+Once the CIB file is found, it is copied to a shadow and this
+shadow is immediately available for use at both `configure` and
+`cibstatus` levels.
+
+If the shadow name is omitted then the target shadow is named
+after the input CIB file.
+
+Note that there are often more than one PE input file, so you may
+need to specify the full name.
+
+Usage:
+...............
+import {<file>|<number>} [<shadow>]
+...............
+Examples:
+...............
+import pe-warn-2222
+import 2289 issue2
+...............
+
+[[cmdhelp_cib_list,list all shadow CIBs]]
+==== `list`
+
+List existing shadow CIBs.
+
+Usage:
+...............
+list
+...............
+
+[[cmdhelp_cib_new,create a new shadow CIB]]
+==== `new`
+
+Create a new shadow CIB. The live cluster configuration and
+status is copied to the shadow CIB.
+
+If the name of the shadow is omitted, we create a temporary CIB
+shadow. It is useful if multiple level sessions are desired
+without affecting the cluster. A temporary CIB shadow is short
+lived and will be removed either on `commit` or on program exit.
+Note that if the temporary shadow is not committed all changes in
+the temporary shadow are lost.
+
+Specify `withstatus` if you want to edit the status section of
+the shadow CIB (see the <<cmdhelp_cibstatus,cibstatus section>>).
+Add `force` to force overwriting the existing shadow CIB.
+
+To start with an empty configuration that is not copied from the live
+CIB, specify the `empty` keyword. (This also allows a shadow CIB to be
+created in case no cluster is running.)
+
+Usage:
+...............
+new [<cib>] [withstatus] [force] [empty]
+...............
+
+[[cmdhelp_cib_reset,copy live cib to a shadow CIB]]
+==== `reset`
+
+Copy the current cluster configuration into the shadow CIB.
+
+Usage:
+...............
+reset <cib>
+...............
+
+[[cmdhelp_cib_use,change working CIB]]
+==== `use`
+
+Choose a CIB source. If you want to edit the status from the
+shadow CIB specify `withstatus` (see <<cmdhelp_cibstatus,`cibstatus`>>).
+Leave out the CIB name to switch to the running CIB.
+
+Usage:
+...............
+use [<cib>] [withstatus]
+...............
+
+[[cmdhelp_ra,Resource Agents (RA) lists and documentation]]
+=== `ra` - Resource Agents (RA) lists and documentation
+
+This level contains commands which show various information about
+the installed resource agents. It is available both at the top
+level and at the `configure` level.
+
+[[cmdhelp_ra_classes,list classes and providers]]
+==== `classes`
+
+Print all resource agents' classes and, where appropriate, a list
+of available providers.
+
+Usage:
+...............
+classes
+...............
+
+[[cmdhelp_ra_info,show meta data for a RA]]
+==== `info` (`meta`)
+
+Show the meta-data of a resource agent type. This is where users
+can find information on how to use a resource agent. It is also
+possible to get information from some programs: `pengine`,
+`crmd`, `cib`, and `stonithd`. Just specify the program name
+instead of an RA.
+
+Usage:
+...............
+info [<class>:[<provider>:]]<type>
+info <type> <class> [<provider>] (obsolete)
+...............
+Example:
+...............
+info apache
+info ocf:pacemaker:Dummy
+info stonith:ipmilan
+info pengine
+...............
+
+[[cmdhelp_ra_list,list RA for a class (and provider)]]
+==== `list`
+
+List available resource agents for the given class. If the class
+is `ocf`, supply a provider to get agents which are available
+only from that provider.
+
+Usage:
+...............
+list <class> [<provider>]
+...............
+Example:
+...............
+list ocf pacemaker
+...............
+
+[[cmdhelp_ra_providers,show providers for a RA and a class]]
+==== `providers`
+
+List providers for a resource agent type. The class parameter
+defaults to `ocf`.
+
+Usage:
+...............
+providers <type> [<class>]
+...............
+Example:
+...............
+providers apache
+...............
+
+[[cmdhelp_ra_validate,validate parameters for RA]]
+==== `validate`
+
+If the resource agent supports the `validate-all` action, this calls
+the action with the given parameters, printing any warnings or errors
+reported by the agent.
+
+Usage:
+................
+validate <agent> [<key>=<value> ...]
+................
+
+[[cmdhelp_resource,Resource management]]
+=== `resource` - Resource management
+
+At this level resources may be managed.
+
+All (or almost all) commands are implemented with the CRM tools
+such as `crm_resource(8)`.
+
+[[cmdhelp_resource_ban,ban a resource from a node]]
+==== `ban`
+
+Ban a resource from running on a certain node. If no node is given
+as argument, the resource is banned from the current location.
+
+See `move` for details on other arguments.
+
+Usage:
+...............
+ban <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_cleanup,cleanup resource status]]
+==== `cleanup`
+
+If resource has any past failures, clear its history and fail
+count. Typically done after the resource has temporarily
+failed.
+
+If a node is omitted, cleanup on all nodes.
+
++(Pacemaker 1.1.14)+ Pass force to cleanup the resource itself,
+otherwise the cleanup command will apply to the parent resource (if
+any).
+
+Usage:
+...............
+cleanup [<rsc>] [<node>] [force]
+...............
+
+[[cmdhelp_resource_clear,Clear any relocation constraint]]
+==== `clear` (`unmove`, `unmigrate`, `unban`)
+
+Remove any relocation constraint created by
+the `move`, `migrate` or `ban` command.
+
+Usage:
+...............
+clear <rsc>
+unmigrate <rsc>
+unban <rsc>
+...............
+
+[[cmdhelp_resource_constraints,Show constraints affecting a resource]]
+==== `constraints`
+
+Display the location and colocation constraints affecting the
+resource.
+
+Usage:
+................
+constraints <rsc>
+................
+
+[[cmdhelp_resource_demote,demote a promotable resource]]
+==== `demote`
+
+Demote a promotable resource using the `target-role`
+attribute.
+
+Usage:
+...............
+demote <rsc>
+...............
+
+[[cmdhelp_resource_failcount,manage failcounts]]
+==== `failcount`
+
+Show/edit/delete the failcount of a resource.
+When `set` a non-zero value, `operation` and `interval` should be
+provided when multiple operation failcount entries exist.
+`interval` is a value in seconds.
+
+Usage:
+...............
+failcount <rsc> set <node> <value> [operation] [interval]
+failcount <rsc> delete <node>
+failcount <rsc> show <node>
+...............
+Example:
+...............
+failcount fs_0 delete node2
+...............
+
+[[cmdhelp_resource_locate,show the location of resources]]
+==== `locate`
+
+Show the current location of one or more resources.
+
+Usage:
+...............
+locate [<rsc> ...]
+...............
+
+[[cmdhelp_resource_maintenance,Enable/disable per-resource maintenance mode]]
+==== `maintenance`
+
+Enables or disables the per-resource maintenance mode. When this mode
+is enabled, no monitor operations will be triggered for the resource.
+`maintenance` attribute conflicts with the `is-managed`. When setting
+the `maintenance` attribute, the user is proposed to remove the
+`is-managed` attribute if it exists.
+
+Usage:
+..................
+maintenance <resource> [on|off|true|false]
+..................
+
+Example:
+..................
+maintenance rsc1
+maintenance rsc2 off
+..................
+
+[[cmdhelp_resource_manage,put a resource into managed mode]]
+==== `manage`
+
+Manage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+`is-managed` attribute conflicts with the `maintenance`. When setting
+the `is-managed` attribute, the user is proposed to remove the
+`maintenance` attribute if it exists.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+manage <rsc>
+...............
+
+[[cmdhelp_resource_meta,manage a meta attribute]]
+==== `meta`
+
+Show/edit/delete a meta attribute of a resource. Currently, all
+meta attributes of a resource may be managed with other commands
+such as `resource stop`.
+
+Usage:
+...............
+meta <rsc> set <attr> <value>
+meta <rsc> delete <attr>
+meta <rsc> show <attr>
+...............
+Example:
+...............
+meta ip_0 set target-role stopped
+...............
+
+[[cmdhelp_resource_move,Move a resource to another node]]
+==== `move` (`migrate`)
+
+Move a resource away from its current location.
+
+If the destination node is left out, the resource is migrated by
+creating a constraint which prevents it from running on the current
+node. For this type of constraint to be created, the +force+ argument
+is required.
+
+A lifetime may be given for the constraint. Once it expires, the
+location constraint will no longer be active.
+
+Usage:
+...............
+move <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_operations,Show active resource operations]]
+==== `operations`
+
+Show active operations, optionally filtered by resource and node.
+
+Usage:
+................
+operations [<rsc>] [<node>]
+................
+
+[[cmdhelp_resource_param,manage a parameter of a resource]]
+==== `param`
+
+Show/edit/delete a parameter of a resource.
+
+Usage:
+...............
+param <rsc> set <param> <value>
+param <rsc> delete <param>
+param <rsc> show <param>
+...............
+Example:
+...............
+param ip_0 show ip
+...............
+
+[[cmdhelp_resource_promote,promote a promotable resource]]
+==== `promote`
+
+Promote a promotable resource using the `target-role`
+attribute.
+
+Usage:
+...............
+promote <rsc>
+...............
+
+[[cmdhelp_resource_refresh,Recheck current resource status and drop failure history]]
+==== `refresh`
+
+Delete resource's history (including failures) so its current state is rechecked.
+
+Usage:
+...............
+refresh [<rsc>] [<node>] [force]
+...............
+
+[[cmdhelp_resource_restart,restart resources]]
+==== `restart`
+
+Restart one or more resources. This is essentially a shortcut for
+resource stop followed by a start. The shell is first going to wait
+for the stop to finish, that is for all resources to really stop, and
+only then to order the start action. Due to this command
+entailing a whole set of operations, informational messages are
+printed to let the user see some progress.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+restart <rsc> [<rsc> ...]
+...............
+Example:
+...............
+# crm resource restart g_webserver
+INFO: ordering g_webserver to stop
+waiting for stop to finish .... done
+INFO: ordering g_webserver to start
+#
+...............
+
+[[cmdhelp_resource_scores,Display resource scores]]
+==== `scores`
+
+Display the allocation scores for all resources.
+
+Usage:
+................
+scores
+................
+
+[[cmdhelp_resource_secret,manage sensitive parameters]]
+==== `secret`
+
+Sensitive parameters can be kept in local files rather than CIB
+in order to prevent accidental data exposure. Use the `secret`
+command to manage such parameters. `stash` and `unstash` move the
+value from the CIB and back to the CIB respectively. The `set`
+subcommand sets the parameter to the provided value. `delete`
+removes the parameter completely. `show` displays the value of
+the parameter from the local file. Use `check` to verify if the
+local file content is valid.
+
+Usage:
+...............
+secret <rsc> set <param> <value>
+secret <rsc> stash <param>
+secret <rsc> unstash <param>
+secret <rsc> delete <param>
+secret <rsc> show <param>
+secret <rsc> check <param>
+...............
+Example:
+...............
+secret fence_1 show password
+secret fence_1 stash password
+secret fence_1 set password secret_value
+...............
+
+[[cmdhelp_resource_start,start resources]]
+==== `start`
+
+Start one or more resources by setting the `target-role` attribute. If
+there are multiple meta attributes sets, the attribute is set in all
+of them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+start <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_status,show status of resources]]
+==== `status` (`show`, `list`)
+
+Print resource status. More than one resource can be shown at once. If
+the resource parameter is left out, the status of all resources is
+printed.
+
+Usage:
+...............
+status [<rsc> ...]
+...............
+
+[[cmdhelp_resource_stop,stop resources]]
+==== `stop`
+
+Stop one or more resources using the `target-role` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+stop <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_trace,start RA tracing]]
+==== `trace`
+
+Start tracing RA for the given operation. When `[<log-dir>]`
+is not specified the trace files are stored in `$HA_VARLIB/trace_ra`.
+If the operation to be traced is monitor, note that the number
+of trace files can grow very quickly.
+
+If no operation name is given, crmsh will attempt to trace all
+operations for the RA. This includes any configured operations, start
+and stop as well as promote/demote for multistate resources.
+
+To trace the probe operation which exists for all resources, either
+set a trace for `monitor` with interval `0`, or use `probe` as the
+operation name.
+
+Note: RA tracing is only supported by OCF resource agents;
+The pacemaker-execd daemon does not log recurring monitor operations
+unless an error occurred.
+
+Usage:
+...............
+trace <rsc> [<op> [<interval>] [<log-dir>]]
+...............
+Example:
+...............
+trace fs start
+trace webserver
+trace webserver probe
+trace fs monitor 0 /var/log/foo/bar
+...............
+
+[[cmdhelp_resource_unmanage,put a resource into unmanaged mode]]
+==== `unmanage`
+
+Unmanage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+unmanage <rsc>
+...............
+
+[[cmdhelp_resource_untrace,stop RA tracing]]
+==== `untrace`
+
+Stop tracing RA for the given operation. If no operation name is
+given, crmsh will attempt to stop tracing all operations in resource.
+
+Usage:
+...............
+untrace <rsc> [<op> [<interval>] ]
+...............
+Example:
+...............
+untrace fs start
+untrace webserver
+...............
+
+[[cmdhelp_resource_utilization,manage a utilization attribute]]
+==== `utilization`
+
+Show/edit/delete a utilization attribute of a resource. These
+attributes describe hardware requirements. By setting the
+`placement-strategy` cluster property appropriately, it is
+possible then to distribute resources based on resource
+requirements and node size. See also <<cmdhelp_node_utilization,node utilization attributes>>.
+
+Usage:
+...............
+utilization <rsc> set <attr> <value>
+utilization <rsc> delete <attr>
+utilization <rsc> show <attr>
+...............
+Example:
+...............
+utilization xen1 set memory 4096
+...............
+
+[[cmdhelp_node,Node management]]
+=== `node` - Node management
+
+Node management and status commands.
+
+[[cmdhelp_node_attribute,manage attributes]]
+==== `attribute`
+
+Edit node attributes. This kind of attribute should refer to
+relatively static properties, such as memory size.
+
+Usage:
+...............
+attribute <node> set <attr> <value>
+attribute <node> delete <attr>
+attribute <node> show <attr>
+...............
+Example:
+...............
+attribute node_1 set memory_size 4096
+...............
+
+[[cmdhelp_node_clearstate,Clear node state]]
+==== `clearstate`
+
+Resets and clears the state of the specified node. This node is
+afterwards assumed clean and offline. This command can be used to
+manually confirm that a node has been fenced (e.g., powered off).
+
+Be careful! This can cause data corruption if you confirm that a node is
+down that is, in fact, not cleanly down - the cluster will proceed as if
+the fence had succeeded, possibly starting resources multiple times.
+
+Usage:
+...............
+clearstate <node>
+...............
+
+[[cmdhelp_node_delete,delete node (deprecated)]]
+==== `delete`
+
+Remove a node from cluster.
+
+If the node is still listed as active and a member of our
+partition we refuse to remove it. With the global force option
+(`-F`) we will try to delete the node anyway.
+
+Usage:
+...............
+delete <node>
+...............
+
+.Deprecation note
+*****
+This command is deprecated and in favor of `crm cluster remove [-F] -c <node>`,
+which will adjust the related cluster configurations and clean up the leftover
+(eg. stopping the cluster services) on the removed node.
+*****
+
+[[cmdhelp_node_fence,fence node]]
+
+==== `fence`
+
+Make CRM fence a node. This functionality depends on stonith
+resources capable of fencing the specified node. No such stonith
+resources, no fencing will happen.
+
+Usage:
+...............
+fence <node>
+...............
+
+[[cmdhelp_node_maintenance,put node into maintenance mode]]
+==== `maintenance`
+
+Set the node status to maintenance. This is equivalent to the
+cluster-wide `maintenance-mode` property but puts just one node
+into the maintenance mode. If there are maintained resources on
+the node, the user will be proposed to remove the maintenance
+property from them.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+maintenance [<node>]
+...............
+
+[[cmdhelp_node_online,set node online,From Code]]
+==== `online`
+See "crm node help online" or "crm node online --help"
+
+[[cmdhelp_node_ready,put node into ready mode]]
+==== `ready`
+
+Set the node's maintenance status to `off`. The node should be
+now again fully operational and capable of running resource
+operations.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+ready [<node>]
+...............
+
+[[cmdhelp_node_server,show node hostname or server address]]
+==== `server`
+
+Remote nodes may have a configured server address which should
+be used when contacting the node. This command prints the
+server address if configured, else the node name.
+
+If no parameter is given, the addresses or names for all nodes
+are printed.
+
+Usage:
+...............
+server [<node> ...]
+...............
+
+[[cmdhelp_node_show,show node]]
+==== `show`
+
+Show a node definition. If the node parameter is omitted, then all
+nodes are shown.
+
+Usage:
+...............
+show [<node>]
+...............
+
+[[cmdhelp_node_standby,put node into standby,From Code]]
+==== `standby`
+See "crm node help standby" or "crm node standby --help"
+
+[[cmdhelp_node_status-attr,manage status attributes]]
+==== `status-attr`
+
+Edit node attributes which are in the CIB status section, i.e.,
+attributes which hold properties of a more volatile nature. One
+typical example is attribute generated by the `pingd` utility.
+
+Usage:
+...............
+status-attr <node> set <attr> <value>
+status-attr <node> delete <attr>
+status-attr <node> show <attr>
+...............
+Example:
+...............
+status-attr node_1 show pingd
+...............
+
+[[cmdhelp_node_utilization,manage utilization attributes]]
+==== `utilization`
+
+Edit node utilization attributes. These attributes describe
+hardware characteristics as integer numbers such as memory size
+or the number of CPUs. By setting the `placement-strategy`
+cluster property appropriately, it is possible then to distribute
+resources based on resource requirements and node size. See also
+<<cmdhelp_resource_utilization,resource utilization attributes>>.
+
+Usage:
+...............
+utilization <node> set <attr> <value>
+utilization <node> delete <attr>
+utilization <node> show <attr>
+...............
+Examples:
+...............
+utilization node_1 set memory 16384
+utilization node_1 show cpu
+...............
+
+[[cmdhelp_site,GEO clustering site support]]
+=== `site` - GEO clustering site support
+
+A cluster may consist of two or more subclusters in different and
+distant locations. This set of commands supports such setups.
+
+[[cmdhelp_site_ticket,manage site tickets]]
+==== `ticket`
+
+Tickets are cluster-wide attributes. They can be managed at the
+site where this command is executed.
+
+It is then possible to constrain resources depending on the
+ticket availability (see the <<cmdhelp_configure_rsc_ticket,`rsc_ticket`>> command
+for more details).
+
+Usage:
+...............
+ticket {grant|revoke|standby|activate|show|time|delete} <ticket>
+...............
+Example:
+...............
+ticket grant ticket1
+...............
+
+[[cmdhelp_options,User preferences]]
+=== `options` - User preferences
+
+The user may set various options for the crm shell itself.
+
+[[cmdhelp_options_add-quotes,add quotes around parameters containing spaces]]
+==== `add-quotes`
+
+The shell (as in `/bin/sh`) parser strips quotes from the command
+line. This may sometimes make it really difficult to type values
+which contain white space. One typical example is the configure
+filter command. The crm shell will supply extra quotes around
+arguments which contain white space. The default is `yes`.
+
+.Note on quotes use
+****************************
+Adding quotes around arguments automatically has been introduced
+with version 1.2.2 and it is technically a regression. Being a
+regression is the only reason the `add-quotes` option exists. If
+you have custom shell scripts which would break, just set the
+`add-quotes` option to `no`.
+
+For instance, with adding quotes enabled, it is possible to do
+the following:
+...............
+# crm configure primitive d1 Dummy \
+ meta description="some description here"
+# crm configure filter 'sed "s/hostlist=./&node-c /"' fencing
+...............
+****************************
+
+[[cmdhelp_options_check-frequency,when to perform semantic check]]
+==== `check-frequency`
+
+Semantic check of the CIB or elements modified or created may be
+done on every configuration change (`always`), when verifying
+(`on-verify`) or `never`. It is by default set to `always`.
+Experts may want to change the setting to `on-verify`.
+
+The checks require that resource agents are present. If they are
+not installed at the configuration time set this preference to
+`never`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_check-mode,how to treat semantic errors]]
+==== `check-mode`
+
+Semantic check of the CIB or elements modified or created may be
+done in the `strict` mode or in the `relaxed` mode. In the former
+certain problems are treated as configuration errors. In the
+`relaxed` mode all are treated as warnings. The default is `strict`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_colorscheme,set colors for output]]
+==== `colorscheme`
+
+With `output` set to `color`, a comma separated list of colors
+from this option are used to emphasize:
+
+- keywords
+- object ids
+- attribute names
+- attribute values
+- scores
+- resource references
+
+`crm` can show colors only if there is curses support for python
+installed (usually provided by the `python-curses` package). The
+colors are whatever is available in your terminal. Use `normal`
+if you want to keep the default foreground color.
+
+This user preference defaults to
+`yellow,normal,cyan,red,green,magenta` which is good for
+terminals with dark background. You may want to change the color
+scheme and save it in the preferences file for other color
+setups.
+
+Example:
+...............
+colorscheme yellow,normal,blue,red,green,magenta
+...............
+
+[[cmdhelp_options_editor,set preferred editor program]]
+==== `editor`
+
+The `edit` command invokes an editor. Use this to specify your
+preferred editor program. If not set, it will default to either
+the value of the `EDITOR` environment variable or to one of the
+standard UNIX editors (`vi`,`emacs`,`nano`).
+
+Usage:
+...............
+editor program
+...............
+Example:
+...............
+editor vim
+...............
+
+[[cmdhelp_options_manage-children,how to handle children resource attributes]]
+==== `manage-children`
+
+Some resource management commands, such as `resource stop`, when
+the target resource is a group, may not always produce desired
+result. Each element, group and the primitive members, can have a
+meta-attribute and those attributes may end up with conflicting
+values. Consider the following construct:
+...............
+crm(live)# configure show svc fs virtual-ip
+primitive fs Filesystem \
+ params device="/dev/drbd0" directory="/srv/nfs" fstype=ext3 \
+ op monitor interval=10s \
+ meta target-role=Started
+primitive virtual-ip IPaddr2 \
+ params ip=10.2.13.110 iflabel=1 \
+ op monitor interval=10s \
+ op start interval=0 \
+ meta target-role=Started
+group svc fs virtual-ip \
+ meta target-role=Stopped
+...............
+
+Even though the element +svc+ should be stopped, the group is
+actually running because all its members have the +target-role+
+set to +Started+:
+...............
+crm(live)# resource show svc
+resource svc is running on: xen-f
+...............
+
+Hence, if the user invokes +resource stop svc+ the intention is
+not clear. This preference gives the user an opportunity to
+better control what happens if attributes of group members have
+values which are in conflict with the same attribute of the group
+itself.
+
+Possible values are +ask+ (the default), +always+, and +never+.
+If set to +always+, the crm shell removes all children attributes
+which have values different from the parent. If set to +never+,
+all children attributes are left intact. Finally, if set to
++ask+, the user will be asked for each member what is to be done.
+
+[[cmdhelp_options_output,set output type]]
+==== `output`
+
+`crm` can adorn configurations in two ways: in color (similar to
+for instance the `ls --color` command) and by showing keywords in
+upper case. Possible values are `plain`, `color-always`, `color`,
+and 'uppercase'. It is possible to combine `uppercase` with one
+of the color values in order to get an upper case xmass tree. Just
+set this option to `color,uppercase` or `color-always,uppercase`.
+In case you need color codes in pipes, `color-always` forces color
+codes even in case the terminal is not a tty (just like `ls
+--color=always`).
+
+[[cmdhelp_options_pager,set preferred pager program]]
+==== `pager`
+
+The `view` command displays text through a pager. Use this to
+specify your preferred pager program. If not set, it will default
+to either the value of the `PAGER` environment variable or to one
+of the standard UNIX system pagers (`less`,`more`,`pg`).
+
+[[cmdhelp_options_reset,reset user preferences to factory defaults]]
+==== `reset`
+
+This command resets all user options to the defaults. If used as
+a single-shot command, the rc file (+$HOME/.config/crm/rc+) is
+reset to the defaults too.
+
+[[cmdhelp_options_save,save the user preferences to the rc file]]
+==== `save`
+
+Save current settings to the rc file (+$HOME/.config/crm/rc+). On
+further `crm` runs, the rc file is automatically read and parsed.
+
+[[cmdhelp_options_set,Set the value of a given option]]
+==== `set`
+
+Sets the value of an option. Takes the fully qualified
+name of the option as argument, as displayed by +show all+.
+
+The modified option value is stored in the user-local
+configuration file, usually found in +~/.config/crm/crm.conf+.
+
+Usage:
+........
+set <option> <value>
+........
+
+Example:
+........
+set color.warn "magenta bold"
+set editor nano
+........
+
+[[cmdhelp_options_show,show current user preference]]
+==== `show`
+
+Display all current settings.
+
+Given an option name as argument, `show` will display only the value
+of that argument.
+
+Given +all+ as argument, `show` displays all available user options.
+
+Usage:
+........
+show [all|<option>]
+........
+
+Example:
+........
+show
+show skill-level
+show all
+........
+
+[[cmdhelp_options_skill-level,set skill level]]
+==== `skill-level`
+
+Based on the skill-level setting, the user is allowed to use only
+a subset of commands. There are three levels: operator,
+administrator, and expert. The operator level allows only
+commands at the `resource` and `node` levels, but not editing
+or deleting resources. The administrator may do that and may also
+configure the cluster at the `configure` level and manage the
+shadow CIBs. The expert may do all.
+
+Usage:
+...............
+skill-level <level>
+
+level :: operator | administrator | expert
+...............
+
+.Note on security
+****************************
+The `skill-level` option is advisory only. There is nothing
+stopping any users change their skill level (see
+<<topics_Features_Security,Access Control Lists (ACL)>> on how to enforce
+access control).
+****************************
+
+[[cmdhelp_options_sort-elements,sort CIB elements]]
+==== `sort-elements`
+
+`crm` by default sorts CIB elements. If you want them appear in
+the order they were created, set this option to `no`.
+
+Usage:
+...............
+sort-elements {yes|no}
+...............
+Example:
+...............
+sort-elements no
+...............
+
+[[cmdhelp_options_user,set the cluster user]]
+==== `user`
+
+Sufficient privileges are necessary in order to manage a
+cluster: programs such as `crm_verify` or `crm_resource` and,
+ultimately, `cibadmin` have to be run either as `root` or as the
+CRM owner user (typically `hacluster`). You don't have to worry
+about that if you run `crm` as `root`. A more secure way is to
+run the program with your usual privileges, set this option to
+the appropriate user (such as `hacluster`), and setup the
+`sudoers` file.
+
+Usage:
+...............
+user system-user
+...............
+Example:
+...............
+user hacluster
+...............
+
+[[cmdhelp_options_wait,synchronous operation]]
+==== `wait`
+
+In normal operation, `crm` runs a command and gets back
+immediately to process other commands or get input from the user.
+With this option set to `yes` it will wait for the started
+transition to finish. In interactive mode dots are printed to
+indicate progress.
+
+Usage:
+...............
+wait {yes|no}
+...............
+Example:
+...............
+wait yes
+...............
+
+[[cmdhelp_configure,CIB configuration]]
+=== `configure` - CIB configuration
+
+This level enables all CIB object definition commands.
+
+The configuration may be logically divided into four parts:
+nodes, resources, constraints, and (cluster) properties and
+attributes. Each of these commands support one or more basic CIB
+objects.
+
+Nodes and attributes describing nodes are managed using the
+`node` command.
+
+Commands for resources are:
+
+- `primitive`
+- `monitor`
+- `group`
+- `clone` (promotable clones)
+- `ms`/`master` (master-slave) (deprecated)
+
+In order to streamline large configurations, it is possible to
+define a template which can later be referenced in primitives:
+
+- `rsc_template`
+
+In that case the primitive inherits all attributes defined in the
+template.
+
+There are three types of constraints:
+
+- `location`
+- `colocation`
+- `order`
+
+It is possible to define fencing order (stonith resource
+priorities):
+
+- `fencing_topology`
+
+Finally, there are the cluster properties, resource meta-attributes
+defaults, and operations defaults. All are just a set of attributes.
+These attributes are managed by the following commands:
+
+- `property`
+- `rsc_defaults`
+- `op_defaults`
+
+In addition to the cluster configuration, the Access Control
+Lists (ACL) can be setup to allow access to parts of the CIB for
+users other than +root+ and +hacluster+. The following commands
+manage ACL:
+
+- `user`
+- `role`
+
+In Pacemaker 1.1.12 and up, this command replaces the `user` command
+for handling ACLs:
+
+- `acl_target`
+
+The changes are applied to the current CIB only on ending the
+configuration session or using the `commit` command.
+
+Comments start with +#+ in the first line. The comments are tied
+to the element which follows. If the element moves, its comments
+will follow.
+
+[[cmdhelp_configure_acl_target,Define target access rights]]
+==== `acl_target`
+
+Defines an ACL target.
+
+Usage:
+................
+acl_target <tid> [<role> ...]
+................
+Example:
+................
+acl_target joe resource_admin constraint_editor
+................
+
+[[cmdhelp_configure_alert,Event-driven alerts]]
+==== `alert`
+
+.Version note
+****************************
+This feature is only available
+in Pacemaker 1.1.15+.
+****************************
+
+Event-driven alerts enables calling scripts whenever interesting
+events occur in the cluster (nodes joining or leaving, resources
+starting or stopping, etc.).
+
+The +path+ is an arbitrary file path to an alert script. Existing
+external scripts used with ClusterMon resources can be used as alert
+scripts, since the interface is compatible.
+
+Each alert may have a number of recipients configured. These will be
+passed to the script as arguments. The first recipient will also be
+passed as the +CRM_alert_recipient+ environment variable, for
+compatibility with existing scripts that only support one recipient.
+
+The available meta-attributes are +timeout+ (default 30s) and
++timestamp-format+ (default `"%H:%M:%S.%06N"`).
+
+Some configurations may require each recipient to be delimited by
+brackets, to avoid ambiguity. In the example +alert-2+ below, the meta
+attribute for `timeout` is defined after the recipient, so the
+brackets are used to ensure that the meta attribute is set for the
+alert and not just the recipient. This can be avoided by setting any
+alert attributes before defining the recipients.
+
+Usage:
+...............
+alert <id> <path> \
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] \
+ [select [nodes | fencing | resources | attributes '{' <attribute> ... '}' ] ...] \
+ [to [{] <recipient>
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] [}] \
+ ...]
+...............
+
+Example:
+...............
+alert alert-1 /srv/pacemaker/pcmk_alert_sample.sh \
+ to /var/log/cluster-alerts.log
+
+alert alert-2 /srv/pacemaker/example_alert.sh \
+ meta timeout=60s \
+ to { /var/log/cluster-alerts.log }
+
+alert alert-3 /srv/pacemaker/example_alert.sh \
+ select fencing \
+ to { /var/log/fencing-alerts.log }
+
+...............
+
+[[cmdhelp_configure_bundle,Container bundle]]
+==== `bundle`
+
+A bundle is a single resource specifying the settings, networking
+requirements, and storage requirements for any number of containers
+generated from the same container image.
+
+Pacemaker bundles support Docker (since version 1.1.17) and rkt (since
+version 1.1.18) container technologies.
+
+A bundle must contain exactly one +docker+ or +rkt+ element.
+
+The bundle definition may contain a reference to a primitive
+resource which defining the resource running inside the
+container.
+
+Example:
+...............
+
+primitive httpd-apache ocf:heartbeat:apache
+
+bundle httpd \
+ docker image=pcmk:httpd replicas=3 \
+ network ip-range-start=10.10.10.123 host-netmask=24 \
+ port-mapping port=80 \
+ storage \
+ storage-mapping target-dir=/var/www/html source-dir=/srv/www options=rw \
+ primitive httpd-apache
+
+...............
+
+[[cmdhelp_configure_cib,CIB shadow management]]
+==== `cib`
+
+This level is for management of shadow CIBs. It is available at
+the `configure` level to enable saving intermediate changes to a
+shadow CIB instead of to the live cluster. This short excerpt
+shows how:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+Note how the current CIB in the prompt changed from +live+ to
++test-2+ after issuing the `cib new` command. See also the
+<<cmdhelp_cib,CIB shadow management>> for more information.
+
+[[cmdhelp_configure_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_configure_clone,define a clone]]
+==== `clone`
+
+The `clone` command creates a resource clone. It may contain a
+single primitive resource or one group of resources.
+
++Promotable clones+ are clone resources with the +promotable=true+ meta-attribute for the given promotable resources.
+It's used to deprecate the master-slave resources.
+
+Usage:
+...............
+clone <name> <rsc>
+ [description=<description>]
+ [meta <attr_list>]
+ [params <attr_list>]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+clone cl_fence apc_1 \
+ meta clone-node-max=1 globally-unique=false
+
+clone disk1 drbd1 \
+ meta promotable=true notify=true globally-unique=false
+...............
+
+[[cmdhelp_configure_colocation,colocate resources]]
+==== `colocation` (`collocation`)
+
+This constraint expresses the placement relation between two
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+The score is used to indicate the priority of the constraint. A
+positive score indicates that the resources should run on the same
+node. A negative score that they should not run on the same
+node. Values of positive or negative +infinity+ indicate a mandatory
+constraint.
+
+In the two resource form, the cluster will place +<with-rsc>+ first,
+and then decide where to put the +<rsc>+ resource.
+
+Collocation resource sets have an extra attribute (+sequential+)
+to allow for sets of resources which don't depend on each other
+in terms of state. The shell syntax for such sets is to put
+resources in parentheses.
+
+Sets cannot be nested.
+
+The optional +node-attribute+ can be used to colocate resources on a
+set of nodes and not necessarily on the same node. For example, by
+setting a node attribute +color+ on all nodes and setting the
++node-attribute+ value to +color+ as well, the colocated resources
+will be placed on any node that has the same color.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+colocation <id> <score>: <rsc>[:<role>] <with-rsc>[:<role>]
+ [node-attribute=<node_attr>]
+
+colocation <id> <score>: <resource_sets>
+ [node-attribute=<node_attr>]
+
+resource_sets :: <resource_set> [<resource_set> ...]
+
+resource_set :: ["("|"["] <rsc>[:<role>] [<rsc>[:<role>] ...] \
+ [<attributes>] [")"|"]"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+colocation never_put_apache_with_dummy -inf: apache dummy
+colocation c1 inf: A ( B C )
+...............
+
+[[cmdhelp_configure_commit,commit the changes to the CIB]]
+==== `commit`
+
+Commit the current configuration to the CIB in use. As noted
+elsewhere, commands in a configure session don't have immediate
+effect on the CIB. All changes are applied at one point in time,
+either using `commit` or when the user leaves the configure
+level. In case the CIB in use changed in the meantime, presumably
+by somebody else, the crm shell will refuse to apply the changes.
+
+If you know that it's fine to still apply them, add +force+ to the
+command line.
+
+To disable CIB patching and apply the changes by replacing the CIB
+completely, add +replace+ to the command line. Note that this can lead
+to previous changes being overwritten if some other process
+concurrently modifies the CIB.
+
+Usage:
+...............
+commit [force] [replace]
+...............
+
+[[cmdhelp_configure_default-timeouts,set timeouts for operations to minimums from the meta-data]]
+==== `default-timeouts`
+
+This command takes the timeouts from the actions section of the
+resource agent meta-data and sets them for the operations of the
+primitive.
+
+Usage:
+...............
+default-timeouts <id> [<id>...]
+...............
+
+.Note on `default-timeouts`
+****************************
+The use of this command is discouraged in favor of manually
+determining the best timeouts required for the particular
+configuration. Relying on the resource agent to supply appropriate
+timeouts can cause the resource to fail at the worst possible moment.
+
+Appropriate timeouts for resource actions are context-sensitive, and
+should be carefully considered with the whole configuration in mind.
+****************************
+
+[[cmdhelp_configure_delete,delete CIB objects]]
+==== `delete`
+
+Delete one or more objects. If an object to be deleted belongs to
+a container object, such as a group, and it is the only resource
+in that container, then the container is deleted as well. Any
+related constraints are removed as well.
+
+If the object is a started resource, it will not be deleted unless the
++--force+ flag is passed to the command, or the +force+ option is set.
+
+Usage:
+...............
+delete [--force] <id> [<id>...]
+...............
+
+[[cmdhelp_configure_edit,edit CIB objects]]
+==== `edit`
+
+This command invokes the editor with the object description. As
+with the `show` command, the user may choose to edit all objects
+or a set of objects.
+
+If the user insists, he or she may edit the XML edition of the
+object. If you do that, don't modify any id attributes.
+
+Usage:
+...............
+edit [xml] [<id> ...]
+edit [xml] changed
+...............
+
+.Note on renaming element ids
+****************************
+The edit command sometimes cannot properly handle modifying
+element ids. In particular for elements which belong to group or
+ms resources. Group and ms resources themselves also cannot be
+renamed. Please use the `rename` command instead.
+****************************
+
+[[cmdhelp_configure_erase,erase the CIB]]
+==== `erase`
+
+.Deprecation note
+****************************
+`crm configure erase` is deprecated.
+The replacement could be `crm cluster remove [node]`
+****************************
+
+The `erase` clears all configuration. Apart from nodes. To remove
+nodes, you have to specify an additional keyword `nodes`.
+
+Note that removing nodes from the live cluster may have some
+strange/interesting/unwelcome effects.
+
+Usage:
+...............
+erase [nodes]
+...............
+
+[[cmdhelp_configure_fencing_topology,node fencing order]]
+==== `fencing_topology`
+
+If multiple fencing (stonith) devices are available capable of
+fencing a node, their order may be specified by +fencing_topology+.
+The order is specified per node.
+
+Stonith resources can be separated by +,+ in which case all of
+them need to succeed. If they fail, the next stonith resource (or
+set of resources) is used. In other words, use comma to separate
+resources which all need to succeed and whitespace for serial
+order. It is not allowed to use whitespace around comma.
+
+If the node is left out, the order is used for all nodes.
+That should reduce the configuration size in some stonith setups.
+
+From Pacemaker version 1.1.14, it is possible to use a node attribute
+as the +target+ in a fencing topology. The syntax for this usage is
+described below.
+
+From Pacemaker version 1.1.14, it is also possible to use regular
+expression patterns as the +target+ in a fencing topology. The configured
+fencing sequence then applies to all devices matching the pattern.
+
+Usage:
+...............
+fencing_topology <stonith_resources> [<stonith_resources> ...]
+fencing_topology <fencing_order> [<fencing_order> ...]
+
+fencing_order :: <target> <stonith_resources> [<stonith_resources> ...]
+
+stonith_resources :: <rsc>[,<rsc>...]
+target :: <node>: | attr:<node-attribute>=<value> | pattern:<pattern>
+...............
+Example:
+...............
+# Only kill the power if poison-pill fails
+fencing_topology poison-pill power
+
+# As above for node-a, but a different strategy for node-b
+fencing_topology \
+ node-a: poison-pill power \
+ node-b: ipmi serial
+
+# Fencing anything on rack 1 requires fencing via both APC 1 and 2,
+# to defeat the redundancy provided by two separate UPS units.
+fencing_topology attr:rack=1 apc01,apc02
+
+# Fencing for all machines named green.* is done using the pear
+# fencing device first, while all machines named red.* are fenced
+# using the apple fencing device first.
+fencing_topology \
+ pattern:green.* pear apple \
+ pattern:red.* apple pear
+...............
+
+[[cmdhelp_configure_filter,filter CIB objects]]
+==== `filter`
+
+This command filters the given CIB elements through an external
+program. The program should accept input on `stdin` and send
+output to `stdout` (the standard UNIX filter conventions). As
+with the `show` command, the user may choose to filter all or
+just a subset of elements.
+
+It is possible to filter the XML representation of objects, but
+probably not as useful as the configuration language. The
+presentation is somewhat different from what would be displayed
+by the `show` command---each element is shown on a single line,
+i.e., there are no backslashes and no other embellishments.
+
+Don't forget to put quotes around the filter if it contains
+spaces.
+
+Usage:
+...............
+filter <prog> [xml] [<id> ...]
+filter <prog> [xml] changed
+...............
+Examples:
+...............
+filter "sed '/^primitive/s/target-role=[^ ]*//'"
+# crm configure filter "sed '/^primitive/s/target-role=[^ ]*//'"
+crm configure <<END
+ filter "sed '/threshold=\"1\"/s/=\"1\"/=\"0\"/g'"
+END
+...............
+
+.Note on quotation marks
+**************************
+Filter commands which feature a blend of quotation marks can be
+difficult to get right, especially when used directly from bash, since
+bash does its own quotation parsing. In these cases, it can be easier
+to supply the filter command as standard input. See the last example
+above.
+**************************
+
+[[cmdhelp_configure_get_property,Get property value]]
+==== `get-property`
+
+Show the value of the given property. If the value is not set, the
+command will print the default value for the property, if known.
+
+If no property name is passed to the command, the list of known
+cluster properties is printed.
+
+If the property is set multiple times, for example using multiple
+property sets with different rule expressions, the output of this
+command is undefined.
+
+Pass the argument +-t+ or +--true+ to `get-property` to translate
+the argument value into +true+ or +false+. If the value is not
+set, the command will print +false+.
+
+Usage:
+...............
+get-property [-t|--true] [<name>]
+...............
+
+Example:
+...............
+get-property stonith-enabled
+get-property -t maintenance-mode
+...............
+
+[[cmdhelp_configure_graph,generate a directed graph]]
+==== `graph`
+
+Create a graphviz graphical layout from the current cluster
+configuration.
+
+Currently, only `dot` (directed graph) is supported. It is
+essentially a visualization of resource ordering.
+
+The graph may be saved to a file which can be used as source for
+various graphviz tools (by default it is displayed in the user's
+X11 session). Optionally, by specifying the format, one can also
+produce an image instead.
+
+For more or different graphviz attributes, it is possible to save
+the default set of attributes to an ini file. If this file exists
+it will always override the builtin settings. The +exportsettings+
+subcommand also prints the location of the ini file.
+
+Usage:
+...............
+graph [<gtype> [<file> [<img_format>]]]
+graph exportsettings
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph dot
+graph dot clu1.conf.dot
+graph dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_configure_group,define a group]]
+==== `group`
+
+The `group` command creates a group of resources. This can be useful
+when resources depend on other resources and require that those
+resources start in order on the same node. A common use of resource
+groups is to ensure that a server and a virtual IP are located
+together, and that the virtual IP is started before the server.
+
+Grouped resources are started in the order they appear in the group,
+and stopped in the reverse order. If a resource in the group cannot
+run anywhere, resources following it in the group will not start.
+
+`group` can be passed the "container" meta attribute, to indicate that
+it is to be used to group VM resources monitored using Nagios. The
+resource referred to by the container attribute must be of type
+`ocf:heartbeat:Xen`, `ocf:heartbeat:VirtualDomain` or `ocf:heartbeat:lxc`.
+
+Usage:
+...............
+group <name> <rsc> [<rsc>...]
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+group internal_www disk0 fs0 internal_ip apache \
+ meta target_role=stopped
+
+group vm-and-services vm vm-sshd meta container="vm"
+...............
+
+[[cmdhelp_configure_load,import the CIB from a file]]
+==== `load`
+
+Load a part of configuration (or all of it) from a local file or
+a network URL. The +replace+ method replaces the current
+configuration with the one from the source. The +update+ method
+tries to import the contents into the current configuration. The
++push+ method imports the contents into the current configuration
+and removes any lines that are not present in the given
+configuration.
+The file may be a CLI file or an XML file.
+
+If the URL is `-`, the configuration is read from standard input.
+
+Usage:
+...............
+load [xml] <method> URL
+
+method :: replace | update | push
+...............
+Example:
+...............
+load xml update myfirstcib.xml
+load xml replace http://storage.big.com/cibs/bigcib.xml
+load xml push smallcib.xml
+...............
+
+[[cmdhelp_configure_location,a location preference]]
+==== `location`
+
+`location` defines the preference of nodes for the given
+resource. The location constraints consist of one or more rules
+which specify a score to be awarded if the rule matches.
+
+The resource referenced by the location constraint can be one of the
+following:
+
+* Plain resource reference: +location loc1 webserver 100: node1+
+* Resource set in curly brackets: +location loc1 { virtual-ip webserver } 100: node1+
+* Tag containing resource ids: +location loc1 tag1 100: node1+
+* Resource pattern: +location loc1 /web.*/ 100: node1+
+
+The +resource-discovery+ attribute allows probes to be selectively
+enabled or disabled per resource and node.
+
+The syntax for resource sets is described in detail for
+<<cmdhelp_configure_colocation,`colocation`>>.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+location <id> <rsc> [<attributes>] {<node_pref>|<rules>}
+
+rsc :: /<rsc-pattern>/
+ | { resource_sets }
+ | <rsc>
+
+attributes :: role=<role> | resource-discovery=always|never|exclusive
+
+node_pref :: <score>: <node>
+
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: string | version | number
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+...............
+Examples:
+...............
+location conn_1 internal_www 100: node1
+
+location conn_1 internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+
+location conn_2 dummy_float \
+ rule -inf: not_defined pingd or pingd number:lte 0
+
+# never probe for rsc1 on node1
+location no-probe rsc1 resource-discovery=never -inf: node1
+...............
+
+[[cmdhelp_configure_modgroup,modify group]]
+==== `modgroup`
+
+Add or remove primitives in a group. The `add` subcommand appends
+the new group member by default. Should it go elsewhere, there
+are `after` and `before` clauses.
+
+Usage:
+...............
+modgroup <id> add <id> [after <id>|before <id>]
+modgroup <id> remove <id>
+...............
+Examples:
+...............
+modgroup share1 add storage2 before share1-fs
+...............
+
+[[cmdhelp_configure_monitor,add monitor operation to a primitive]]
+==== `monitor`
+
+Monitor is by far the most common operation. It is possible to
+add it without editing the whole resource. Also, long primitive
+definitions may be a bit uncluttered. In order to make this
+command as concise as possible, less common operation attributes
+are not available. If you need them, then use the `op` part of
+the `primitive` command.
+
+Usage:
+...............
+monitor <rsc>[:<role>] <interval>[:<timeout>]
+...............
+Example:
+...............
+monitor apcfence 60m:60s
+...............
+
+Note that after executing the command, the monitor operation may
+be shown as part of the primitive definition.
+
+[[cmdhelp_configure_ms,define a master-slave resource (deprecated)]]
+==== `ms` (`master`)
+
+The `ms` command creates a master/slave resource type. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+ms <name> <rsc>
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+ms disk1 drbd1 \
+ meta notify=true globally-unique=false
+...............
+
+.Note on `ms` deprecated
+****************************
+From Pacemaker-2.0, the resource type referred to as "master/slave",
+"stateful", or "multi-state" is no longer a separate resource type,
+but a variation of clone now referred to as a "promotable clone".
+For backward compatibility, above configurations are also accepted.
+...............
+clone disk1 drbd1 \
+ meta promotable=true notify=true globally-unique=false
+...............
+****************************
+
+.Note on `id-ref` usage
+****************************
+Instance or meta attributes (`params` and `meta`) may contain
+a reference to another set of attributes. In that case, no other
+attributes are allowed. Since attribute sets' ids, though they do
+exist, are not shown in the `crm`, it is also possible to
+reference an object instead of an attribute set. `crm` will
+automatically replace such a reference with the right id:
+
+...............
+crm(live)configure# primitive a2 www-2 meta $id-ref=a1
+crm(live)configure# show a2
+primitive a2 apache \
+ meta $id-ref=a1-meta_attributes
+ [...]
+...............
+It is advisable to give meaningful names to attribute sets which
+are going to be referenced.
+****************************
+
+[[cmdhelp_configure_node,define a cluster node]]
+==== `node`
+
+The node command describes a cluster node. Nodes in the CIB are
+commonly created automatically by the CRM. Hence, you should not
+need to deal with nodes unless you also want to define node
+attributes. Note that it is also possible to manage node
+attributes at the `node` level.
+
+Usage:
+...............
+node [$id=<id>] <uname>[:<type>]
+ [description=<description>]
+ [attributes [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+ [utilization [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+
+type :: normal | member | ping | remote
+...............
+Example:
+...............
+node node1
+node big_node attributes memory=64
+...............
+
+[[cmdhelp_configure_op_defaults,set resource operations defaults]]
+==== `op_defaults`
+
+Set defaults for the operations meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+op_defaults [$id=<set_id>] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+op_defaults record-pending=true
+...............
+
+[[cmdhelp_configure_order,order resources]]
+==== `order`
+
+This constraint expresses the order of actions on two resources
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+Ordered resource sets have an extra attribute to allow for sets
+of resources whose actions may run in parallel. The shell syntax
+for such sets is to put resources in parentheses.
+
+If the subsequent resource can start or promote after any one of the
+resources in a set has done, enclose the set in brackets (+[+ and +]+).
+
+Sets cannot be nested.
+
+Three strings are reserved to specify a kind of order constraint:
++Mandatory+, +Optional+, and +Serialize+. It is preferred to use
+one of these settings instead of score. Previous versions mapped
+scores +0+ and +inf+ to keywords +advisory+ and +mandatory+.
+That is still valid but deprecated.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+order <id> [kind:] first then [symmetrical=<bool>]
+
+order <id> [kind:] resource_sets [symmetrical=<bool>]
+
+kind :: Mandatory | Optional | Serialize
+
+first :: <rsc>[:<action>]
+
+then :: <rsc>[:<action>]
+
+resource_sets :: resource_set [resource_set ...]
+
+resource_set :: ["["|"("] <rsc>[:<action>] [<rsc>[:<action>] ...] \
+ [attributes] ["]"|")"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+order o-1 Mandatory: apache:start ip_1
+order o-2 Serialize: A ( B C )
+order o-4 first-resource then-resource
+...............
+
+[[cmdhelp_configure_primitive,define a resource]]
+==== `primitive`
+
+The primitive command describes a resource. It may be referenced
+only once in group, or clone objects. If it's not
+referenced, then it is placed as a single resource in the CIB.
+
+Operations may be specified anonymously, as a group or by reference:
+
+* "Anonymous", as a list of +op+ specifications. Use this
+ method if you don't need to reference the set of operations
+ elsewhere. This is the most common way to define operations.
+
+* If reusing operation sets is desired, use the +operations+ keyword
+ along with an id to give the operations set a name. Use the
+ +operations+ keyword and an id-ref value set to the id of another
+ operations set, to apply the same set of operations to this
+ primitive.
+
+Operation attributes which are not recognized are saved as
+instance attributes of that operation. A typical example is
++OCF_CHECK_LEVEL+.
+
+For multistate resources, roles are specified as +role=<role>+.
+The +Master/Slave+ resources are deprecated and replaced by +Promoted/Unpromoted+ promotable resources if desired.
+
+A template may be defined for resources which are of the same
+type and which share most of the configuration. See
+<<cmdhelp_configure_rsc_template,`rsc_template`>> for more information.
+
+Attributes containing time values, such as the +interval+ attribute on
+operations, are configured either as a plain number, which is
+interpreted as a time in seconds, or using one of the following
+suffixes:
+
+* +s+, +sec+ - time in seconds (same as no suffix)
+* +ms+, +msec+ - time in milliseconds
+* +us+, +usec+ - time in microseconds
+* +m+, +min+ - time in minutes
+* +h+, +hr+ - time in hours
+
+Usage:
+...............
+primitive <rsc> {[<class>:[<provider>:]]<type>|@<template>}
+ [description=<description>]
+ [[params] attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...]
+ [[op_params] attr_list]
+ [op_meta attr_list] ...]
+
+attr_list :: [$id=<id>] [<score>:] [rule...]
+ <attr>=<val> [<attr>=<val>...]] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s \
+ op monitor interval=30m timeout=60s
+
+primitive www8 apache \
+ configfile=/etc/apache/www8.conf \
+ operations $id-ref=apache_ops
+
+primitive db0 mysql \
+ params config=/etc/mysql/db0.conf \
+ op monitor interval=60s \
+ op monitor interval=300s OCF_CHECK_LEVEL=10
+
+primitive r0 ocf:linbit:drbd \
+ params drbd_resource=r0 \
+ op monitor role=Promoted interval=60s \
+ op monitor role=Unpromoted interval=300s
+
+primitive xen0 @vm_scheme1 xmfile=/etc/xen/vm/xen0
+
+primitive mySpecialRsc Special \
+ params 3: rule #uname eq node1 interface=eth1 \
+ params 2: rule #uname eq node2 interface=eth2 port=8888 \
+ params 1: interface=eth0 port=9999
+
+primitive A ocf:pacemaker:Dummy \
+ op start \
+ op_meta 2: rule #ra-version version:gt 1.0 timeout=120s \
+ op_meta 1: timeout=60s
+...............
+
+[[cmdhelp_configure_property,set a cluster property]]
+==== `property`
+
+Set cluster configuration properties. To list the
+available cluster configuration properties, use the
+<<cmdhelp_ra_info,`ra info`>> command with +pengine+, +crmd+,
++cib+ and +stonithd+ as arguments.
+When setting the +maintenance-mode+ property, it will
+inform the user if there are nodes or resources that
+have the +maintenance+ property.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+property [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+property stonith-enabled=true
+property rule date spec years=2014 stonith-enabled=false
+...............
+
+[[cmdhelp_configure_ptest,show cluster actions if changes were committed]]
+==== `ptest` (`simulate`)
+
+Show PE (Policy Engine) motions using `ptest(8)` or
+`crm_simulate(8)`.
+
+A CIB is constructed using the current user edited configuration
+and the status from the running CIB. The resulting CIB is run
+through `ptest` (or `crm_simulate`) to show changes which would
+happen if the configuration is committed.
+
+The status section may be loaded from another source and modified
+using the <<cmdhelp_cibstatus,`cibstatus`>> level commands. In that case, the
+`ptest` command will issue a message informing the user that the
+Policy Engine graph is not calculated based on the current status
+section and therefore won't show what would happen to the
+running but some imaginary cluster.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Add a string of +v+ characters to increase verbosity. `ptest`
+can also show allocation scores. +utilization+ turns on
+information about the remaining capacity of nodes. With the
++actions+ option, `ptest` will print all resource actions.
+
+The `ptest` program has been replaced by `crm_simulate` in newer
+Pacemaker versions. In some installations both could be
+installed. Use `simulate` to enfore using `crm_simulate`.
+
+Usage:
+...............
+ptest [nograph] [v...] [scores] [actions] [utilization]
+...............
+Examples:
+...............
+ptest scores
+ptest vvvvv
+simulate actions
+...............
+
+[[cmdhelp_configure_refresh,refresh from CIB]]
+==== `refresh`
+
+Refresh the internal structures from the CIB. All changes made
+during this session are lost.
+
+Usage:
+...............
+refresh
+...............
+
+[[cmdhelp_configure_rename,rename a CIB object]]
+==== `rename`
+
+Rename an object. It is recommended to use this command to rename
+a resource, because it will take care of updating all related
+constraints and a parent resource. Changing ids with the edit
+command won't have the same effect.
+
+If you want to rename a resource, it must be in the stopped state.
+
+Usage:
+...............
+rename <old_id> <new_id>
+...............
+
+[[cmdhelp_configure_role,define role access rights]]
+==== `role`
+
+An ACL role is a set of rules which describe access rights to
+CIB. Rules consist of an access right +read+, +write+, or +deny+
+and a specification denoting part of the configuration to which
+the access right applies. The specification can be an XPath or a
+combination of tag and id references. If an attribute is
+appended, then the specification applies only to that attribute
+of the matching element.
+
+There is a number of shortcuts for XPath specifications. The
++meta+, +params+, and +utilization+ shortcuts reference resource
+meta attributes, parameters, and utilization respectively. The
+`location` may be used to specify location constraints most of
+the time to allow resource `move` and `unmove` commands. The
+`property` references cluster properties. The `node` allows
+reading node attributes. +nodeattr+ and +nodeutil+ reference node
+attributes and node capacity (utilization). The `status` shortcut
+references the whole status section of the CIB. Read access to
+status is necessary for various monitoring tools such as
+`crm_mon(8)` (aka `crm status`).
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+role <role-id> rule [rule ...]
+
+rule :: acl-right cib-spec [attribute:<attribute>]
+
+acl-right :: read | write | deny
+
+cib-spec :: xpath-spec | tag-ref-spec
+xpath-spec :: xpath:<xpath> | shortcut
+tag-ref-spec :: tag:<tag> | ref:<id> | tag:<tag> ref:<id>
+
+shortcut :: meta:<rsc>[:<attr>]
+ params:<rsc>[:<attr>]
+ utilization:<rsc>
+ location:<rsc>
+ property[:<attr>]
+ node[:<node>]
+ nodeattr[:<attr>]
+ nodeutil[:<node>]
+ status
+...............
+Example:
+...............
+role app1_admin \
+ write meta:app1:target-role \
+ write meta:app1:is-managed \
+ write location:app1 \
+ read ref:app1
+...............
+
+[[cmdhelp_configure_rsc_defaults,set resource defaults]]
+==== `rsc_defaults`
+
+Set defaults for the resource meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+rsc_defaults [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+rsc_defaults failure-timeout=3m
+...............
+
+[[cmdhelp_configure_rsc_template,define a resource template]]
+==== `rsc_template`
+
+The `rsc_template` command creates a resource template. It may be
+referenced in primitives. It is used to reduce large
+configurations with many similar resources.
+
+Usage:
+...............
+rsc_template <name> [<class>:[<provider>:]]<type>
+ [description=<description>]
+ [params attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+rsc_template public_vm Xen \
+ op start timeout=300s \
+ op stop timeout=300s \
+ op monitor interval=30s timeout=60s \
+ op migrate_from timeout=600s \
+ op migrate_to timeout=600s
+primitive xen0 @public_vm \
+ params xmfile=/etc/xen/xen0
+primitive xen1 @public_vm \
+ params xmfile=/etc/xen/xen1
+...............
+
+[[cmdhelp_configure_rsc_ticket,resources ticket dependency]]
+==== `rsc_ticket`
+
+This constraint expresses dependency of resources on cluster-wide
+attributes, also known as tickets. Tickets are mainly used in
+geo-clusters, which consist of multiple sites. A ticket may be
+granted to a site, thus allowing resources to run there.
+
+The +loss-policy+ attribute specifies what happens to the
+resource (or resources) if the ticket is revoked. The default is
+either +stop+ or +demote+ depending on whether a resource is
+multi-state.
+
+See also the <<cmdhelp_site_ticket,`site`>> set of commands.
+
+Usage:
+...............
+rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]
+
+loss_policy_action :: stop | demote | fence | freeze
+...............
+Example:
+...............
+rsc_ticket ticket-A_public-ip ticket-A: public-ip
+rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence
+rsc_ticket ticket-B_storage ticket-B: drbd-a:Promoted drbd-b:Promoted
+...............
+
+
+[[cmdhelp_configure_rsctest,test resources as currently configured]]
+==== `rsctest`
+
+Test resources with current resource configuration. If no nodes
+are specified, tests are run on all known nodes.
+
+The order of resources is significant: it is assumed that later
+resources depend on earlier ones.
+
+If a resource is multi-state, it is assumed that the role on
+which later resources depend is master.
+
+Tests are run sequentially to prevent running the same resource
+on two or more nodes. Tests are carried out only if none of the
+specified nodes currently run any of the specified resources.
+However, it won't verify whether resources run on the other
+nodes.
+
+Superuser privileges are obviously required: either run this as
+root or setup the `sudoers` file appropriately.
+
+Note that resource testing may take some time.
+
+Usage:
+...............
+rsctest <rsc_id> [<rsc_id> ...] [<node_id> ...]
+...............
+Examples:
+...............
+rsctest my_ip websvc
+rsctest websvc nodeB
+...............
+
+[[cmdhelp_configure_save,save the CIB to a file]]
+==== `save`
+
+Save the current configuration to a file. Optionally, as XML. Use
++-+ instead of file name to write the output to `stdout`.
+
+The `save` command accepts the same selection arguments as the `show`
+command. See the <<cmdhelp_configure_show,help section>> for `show`
+for more details.
+
+Usage:
+...............
+save [xml] [<id> | type:<type | tag:<tag> |
+ related:<obj> | changed ...] <file>
+...............
+Example:
+...............
+save myfirstcib.txt
+save web-server server-config.txt
+...............
+
+[[cmdhelp_configure_schema,set or display current CIB RNG schema]]
+==== `schema`
+
+CIB's content is validated by a RNG schema. Pacemaker supports
+several, depending on version. At least the following schemas are
+accepted by `crmsh`:
+
+* +pacemaker-1.0+
+* +pacemaker-1.1+
+* +pacemaker-1.2+
+* +pacemaker-1.3+
+* +pacemaker-2.0+
+
+Use this command to display or switch to another RNG schema.
+
+Usage:
+...............
+schema [<schema>]
+...............
+Example:
+...............
+schema pacemaker-1.1
+...............
+
+[[cmdhelp_configure_set,set an attribute value]]
+==== `set`
+
+Set the value of a configured attribute. The attribute must
+configured previously, and can be an agent parameter, meta attribute,
+utilization value or operation value.
+
+The first argument to the command is a path to an attribute.
+This is a dot-separated sequence beginning with the name of
+the resource or object, and ending with the name of the attribute to
+set. To set operation value, `op_type` should be specified; when multi
+operations exist like multi monitors, `interval` should be specified.
+
+Usage:
+...............
+set <path> <value>
+
+path:: id.[op_type.][interval.]name
+...............
+Examples:
+...............
+set vip1.ip 192.168.20.5
+set vm-a.force_stop 1
+set vip1.monitor.on-fail ignore
+set drbd.monitor.10s.interval 20s
+...............
+
+[[cmdhelp_configure_show,display CIB objects]]
+==== `show`
+
+The `show` command displays CIB objects. Without any argument, it
+displays all objects in the CIB, but the set of objects displayed by
+`show` can be limited to only objects with the given IDs or by using
+one or more of the special prefixes described below.
+
+The XML representation for the objects can be displayed by passing
++xml+ as the first argument.
+
+To show one or more specific objects, pass the object IDs as
+arguments.
+
+To show all objects of a certain type, use the +type:+ prefix.
+
+To show all objects in a tag, use the +tag:+ prefix.
+
+To show all constraints related to a primitive, or
+to show all objects of a certain RA type, use the +related:+ prefix.
+
+To show all modified objects, pass the argument +changed+.
+
+The prefixes can be used together on a single command line. For
+example, to show both the tag itself and the objects tagged by it the
+following combination can be used: +show tag:my-tag my-tag+.
+
+To refine a selection of objects using multiple modifiers, the keywords
++and+ and +or+ can be used. For example, to select all primitives tagged
++foo+, the following combination can be used:
++show type:primitive and tag:foo+.
+
+To hide values when displaying the configuration, use the
++obscure:<glob>+ argument. This can be useful when sending the
+configuration over a public channel, to avoid exposing potentially
+sensitive information. The +<glob>+ argument is a bash-style pattern
+matching attribute keys.
+
+In +/etc/crm/crm.conf+, +obscure_pattern+ option is the persisent configuration of CLI.
+Example, for the high security concern,
+...............
+[core]
+obscure_pattern = passw* | ip
+...............
+Which makes +crm configure show+ is equal to
+...............
+node-1:~ # crm configure show obscure:passw* obscure:ip
+node 1084783297: node1
+primitive fence_device stonith:fence_ilo5 \
+ params password="******"
+primitive ip IPaddr2 \
+ params ip="******"
+...............
+The default suggestion is +passw*+
+If you don't want to obscure, change the value to blank.
+
+Usage:
+...............
+show [xml] [<id>
+ | changed
+ | type:<type>
+ | tag:<id>
+ | related:<obj>
+ | obscure:<glob>
+ ...]
+
+type :: node | primitive | group | clone | ms | rsc_template
+ | location | colocation | order
+ | rsc_ticket
+ | property | rsc_defaults | op_defaults
+ | fencing_topology
+ | role | user | acl_target
+ | tag
+...............
+
+Example:
+...............
+show webapp
+show type:primitive
+show xml tag:db tag:fs
+show related:webapp
+show related:IPaddr2
+show related:ipad
+show related:ocf:heartbeat:Dummy
+show related:ocf:heartbeat:dum
+show related:ocf
+show related:heartbeat
+show related:pacemaker
+show related:suse
+show related:stonith
+show type:primitive obscure:passwd
+...............
+
+[[cmdhelp_configure_tag,Define resource tags]]
+==== `tag`
+
+Define a resource tag. A tag is an id referring to one or more
+resources, without implying any constraints between the tagged
+resources. This can be useful for grouping conceptually related
+resources.
+
+Usage:
+...............
+tag <tag-name>: <rsc> [<rsc> ...]
+tag <tag-name> <rsc> [<rsc> ...]
+...............
+Example:
+...............
+tag web: p-webserver p-vip
+tag ips server-vip admin-vip
+...............
+
+[[cmdhelp_configure_template,edit and import a configuration from a template]]
+==== `template`
+
+The specified template is loaded into the editor. It's up to the
+user to make a good CRM configuration out of it. See also the
+<<cmdhelp_template,template section>>.
+
+Usage:
+...............
+template [xml] url
+...............
+Example:
+...............
+template two-apaches.txt
+...............
+
+[[cmdhelp_configure_upgrade,upgrade the CIB]]
+==== `upgrade`
+
+Attempts to upgrade the CIB to validate with the current
+version. Commonly, this is required if the error
+`CIB not supported` occurs. It typically means that the
+active CIB version is coming from an older release.
+
+As a safety precaution, the force argument is required if the
++validation-with+ attribute is set to anything other than
++0.6+. Thus in most cases, it is required.
+
+Usage:
+...............
+upgrade [force]
+...............
+
+Example:
+...............
+upgrade force
+...............
+
+[[cmdhelp_configure_user,define user access rights]]
+==== `user`
+
+Users which normally cannot view or manage cluster configuration
+can be allowed access to parts of the CIB. The access is defined
+by a set of +read+, +write+, and +deny+ rules as in role
+definitions or by referencing roles. The latter is considered
+best practice.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+user <uid> {roles|rules}
+
+roles :: role:<role-ref> [role:<role-ref> ...]
+rules :: rule [rule ...]
+...............
+Example:
+...............
+user joe \
+ role:app1_admin \
+ role:read_all
+...............
+
+[[cmdhelp_configure_validate_all,call agent validate-all for resource]]
+==== `validate-all`
+
+Call the `validate-all` action for the resource, if possible.
+
+Limitations:
+
+* The resource agent must implement the `validate-all` action.
+* The current user must be root.
+* The primitive resource must not use nvpair references.
+
+Usage:
+...............
+validate-all <rsc>
+...............
+
+
+[[cmdhelp_configure_verify,verify the CIB with crm_verify]]
+==== `verify`
+
+Verify the contents of the CIB which would be committed.
+
+Usage:
+...............
+verify
+...............
+
+[[cmdhelp_configure_xml,raw xml]]
+==== `xml`
+
+Even though we promissed no xml, it may happen, but hopefully
+very very seldom, that an element from the CIB cannot be rendered
+in the configuration language. In that case, the element will be
+shown as raw xml, prefixed by this command. That element can then
+be edited like any other. If the shell finds out that after the
+change it can digest it, then it is going to be converted into
+the normal configuration language. Otherwise, there is no need to
+use `xml` for configuration.
+
+Usage:
+...............
+xml <xml>
+...............
+
+[[cmdhelp_template,edit and import a configuration from a template]]
+=== `template` - Import configuration from templates
+
+User may be assisted in the cluster configuration by templates
+prepared in advance. Templates consist of a typical ready
+configuration which may be edited to suit particular user needs.
+
+This command enters a template level where additional commands
+for configuration/template management are available.
+
+[[cmdhelp_template_apply,process and apply the current configuration to the current CIB]]
+==== `apply`
+
+Copy the current or given configuration to the current CIB. By
+default, the CIB is replaced, unless the method is set to
+"update".
+
+Usage:
+...............
+apply [<method>] [<config>]
+
+method :: replace | update
+...............
+
+[[cmdhelp_template_delete,delete a configuration]]
+==== `delete`
+
+Remove a configuration. The loaded (active) configuration may be
+removed by force.
+
+Usage:
+...............
+delete <config> [force]
+...............
+
+[[cmdhelp_template_edit,edit a configuration]]
+==== `edit`
+
+Edit current or given configuration using your favourite editor.
+
+Usage:
+...............
+edit [<config>]
+...............
+
+[[cmdhelp_template_list,list configurations/templates]]
+==== `list`
+
+When called with no argument, lists existing templates and
+configurations.
+
+Given the argument +templates+, lists the available templates.
+
+Given the argument +configs+, lists the available configurations.
+
+Usage:
+...............
+list [templates|configs]
+...............
+
+[[cmdhelp_template_load,load a configuration]]
+==== `load`
+
+Load an existing configuration. Further `edit`, `show`, and
+`apply` commands will refer to this configuration.
+
+Usage:
+...............
+load <config>
+...............
+
+[[cmdhelp_template_new,create a new configuration from templates]]
+==== `new`
+
+Create a new configuration from one or more templates. Note that
+configurations and templates are kept in different places, so it
+is possible to have a configuration name equal a template name.
+
+If you already know which parameters are required, you can set
+them directly on the command line.
+
+The parameter name +id+ is set by default to the name of the
+configuration.
+
+If no parameters are being set and you don't want a particular name
+for your configuration, you can call this command with a template name
+as the only parameter. A unique configuration name based on the
+template name will be generated.
+
+Usage:
+...............
+new [<config>] <template> [<template> ...] [params name=value ...]
+...............
+
+Example:
+...............
+new vip virtual-ip
+new bigfs ocfs2 params device=/dev/sdx8 directory=/bigfs
+new apache
+...............
+
+[[cmdhelp_template_show,show the processed configuration]]
+==== `show`
+
+Process the current or given configuration and display the result.
+
+Usage:
+...............
+show [<config>]
+...............
+
+[[cmdhelp_cibstatus,CIB status management and editing]]
+=== `cibstatus` - CIB status management and editing
+
+The `status` section of the CIB keeps the current status of nodes
+and resources. It is modified _only_ on events, i.e. when some
+resource operation is run or node status changes. For obvious
+reasons, the CRM has no user interface with which it is possible
+to affect the status section. From the user's point of view, the
+status section is essentially a read-only part of the CIB. The
+current status is never even written to disk, though it is
+available in the PE (Policy Engine) input files which represent
+the history of cluster motions. The current status may be read
+using the +cibadmin -Q+ command.
+
+It may sometimes be of interest to see how status changes would
+affect the Policy Engine. The set of `cibstatus` level commands
+allow the user to load status sections from various sources and
+then insert or modify resource operations or change nodes' state.
+
+The effect of those changes may then be observed by running the
+<<cmdhelp_configure_ptest,`ptest`>> command at the `configure` level
+or `simulate` and `run` commands at this level. The `ptest`
+runs with the user edited CIB whereas the latter two commands
+run with the CIB which was loaded along with the status section.
+
+The `simulate` and `run` commands as well as all status
+modification commands are implemented using `crm_simulate(8)`.
+
+[[cmdhelp_cibstatus_load,load the CIB status section]]
+==== `load`
+
+Load a status section from a file, a shadow CIB, or the running
+cluster. By default, the current (+live+) status section is
+modified. Note that if the +live+ status section is modified it
+is not going to be updated if the cluster status changes, because
+that would overwrite the user changes. To make `crm` drop changes
+and resume use of the running cluster status, run +load live+.
+
+All CIB shadow configurations contain the status section which is
+a snapshot of the status section taken at the time the shadow was
+created. Obviously, this status section doesn't have much to do
+with the running cluster status, unless the shadow CIB has just
+been created. Therefore, the `ptest` command by default uses the
+running cluster status section.
+
+Usage:
+...............
+load {<file>|shadow:<cib>|live}
+...............
+Example:
+...............
+load bug-12299.xml
+load shadow:test1
+...............
+
+[[cmdhelp_cibstatus_node,change node status]]
+==== `node`
+
+Change the node status. It is possible to throw a node out of
+the cluster, make it a member, or set its state to unclean.
+
++online+:: Set the +node_state+ `crmd` attribute to +online+
+and the +expected+ and +join+ attributes to +member+. The effect
+is that the node becomes a cluster member.
+
++offline+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to empty. This makes the node
+cleanly removed from the cluster.
+
++unclean+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to +member+. In this case the node
+has unexpectedly disappeared.
+
+Usage:
+...............
+node <node> {online|offline|unclean}
+...............
+Example:
+...............
+node xen-b unclean
+...............
+
+[[cmdhelp_cibstatus_op,edit outcome of a resource operation]]
+==== `op`
+
+Edit the outcome of a resource operation. This way you can
+tell CRM that it ran an operation and that the resource agent
+returned certain exit code. It is also possible to change the
+operation's status. In case the operation status is set to
+something other than +done+, the exit code is effectively
+ignored.
+
+Usage:
+...............
+op <operation> <resource> <exit_code> [<op_status>] [<node>]
+
+operation :: probe | monitor[:<n>] | start | stop |
+ promote | demote | notify | migrate_to | migrate_from
+exit_code :: <rc> | success | generic | args |
+ unimplemented | perm | installed | configured | not_running |
+ master | failed_master
+op_status :: pending | done | cancelled | timeout | notsupported | error
+
+n :: the monitor interval in seconds; if omitted, the first
+ recurring operation is referenced
+rc :: numeric exit code in range 0..9
+...............
+Example:
+...............
+op start d1 xen-b generic
+op start d1 xen-b 1
+op monitor d1 xen-b not_running
+op stop d1 xen-b 0 timeout
+...............
+
+[[cmdhelp_cibstatus_origin,display origin of the CIB status section]]
+==== `origin`
+
+Show the origin of the status section currently in use. This
+essentially shows the latest `load` argument.
+
+Usage:
+...............
+origin
+...............
+
+[[cmdhelp_cibstatus_quorum,set the quorum]]
+==== `quorum`
+
+Set the quorum value.
+
+Usage:
+...............
+quorum <bool>
+...............
+Example:
+...............
+quorum false
+...............
+
+[[cmdhelp_cibstatus_run,run policy engine]]
+==== `run`
+
+Run the policy engine with the edited status section.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+run [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+run
+...............
+
+[[cmdhelp_cibstatus_save,save the CIB status section]]
+==== `save`
+
+The current internal status section with whatever modifications
+were performed can be saved to a file or shadow CIB.
+
+If the file exists and contains a complete CIB, only the status
+section is going to be replaced and the rest of the CIB will
+remain intact. Otherwise, the current user edited configuration
+is saved along with the status section.
+
+Note that all modifications are saved in the source file as soon
+as they are run.
+
+Usage:
+...............
+save [<file>|shadow:<cib>]
+...............
+Example:
+...............
+save bug-12299.xml
+...............
+
+[[cmdhelp_cibstatus_show,show CIB status section]]
+==== `show`
+
+Show the current status section in the XML format. Brace yourself
+for some unreadable output. Add +changed+ option to get a human
+readable output of all changes.
+
+Usage:
+...............
+show [changed]
+...............
+
+[[cmdhelp_cibstatus_simulate,simulate cluster transition]]
+==== `simulate`
+
+Run the policy engine with the edited status section and simulate
+the transition.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+simulate [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+simulate
+...............
+
+[[cmdhelp_cibstatus_ticket,manage tickets]]
+==== `ticket`
+
+Modify the ticket status. Tickets can be granted and revoked.
+Granted tickets could be activated or put in standby.
+
+Usage:
+...............
+ticket <ticket> {grant|revoke|activate|standby}
+...............
+Example:
+...............
+ticket ticketA grant
+...............
+
+[[cmdhelp_assist,Configuration assistant]]
+=== `assist` - Configuration assistant
+
+The `assist` sublevel is a collection of helper
+commands that create or modify resources and
+constraints, to simplify the creation of certain
+configurations.
+
+For more information on individual commands, see
+the help text for those commands.
+
+[[cmdhelp_assist_template,Create template for primitives]]
+==== `template`
+
+This command takes a list of primitives as argument, and creates a new
+`rsc_template` for these primitives. It can only do this if the
+primitives do not already share a template and are of the same type.
+
+Usage:
+........
+template primitive-1 primitive-2 primitive-3
+........
+
+[[cmdhelp_assist_weak-bond,Create a weak bond between resources]]
+==== `weak-bond`
+
+A colocation between a group of resources says that the resources
+should be located together, but it also means that those resources are
+dependent on each other. If one of the resources fails, the others
+will be restarted.
+
+If this is not desired, it is possible to circumvent: By placing the
+resources in a non-sequential set and colocating the set with a dummy
+resource which is not monitored, the resources will be placed together
+but will have no further dependency on each other.
+
+This command creates both the constraint and the dummy resource needed
+for such a colocation.
+
+Usage:
+........
+weak-bond resource-1 resource-2
+........
+
+[[cmdhelp_maintenance,Maintenance mode commands]]
+=== `maintenance` - Maintenance mode commands
+
+Maintenance mode commands are commands that manipulate resources
+directly without going through the cluster infrastructure. Therefore,
+it is essential to ensure that the cluster does not attempt to monitor
+or manipulate the resources while these commands are being executed.
+
+To ensure this, these commands require that maintenance mode is set
+either for the particular resource, or for the whole cluster.
+
+[[cmdhelp_maintenance_action,Invoke a resource action]]
+==== `action`
+
+Invokes the given action for the resource. This is
+done directly via the resource agent, so the command must
+be issued while the cluster or the resource is in
+maintenance mode.
+
+Unless the action is `start` or `monitor`, the action must be invoked
+on the same node as where the resource is running. If the resource is
+running on multiple nodes, the command will fail.
+
+To use SSH for executing resource actions on multiple nodes, append
+`ssh` after the action name. This requires SSH access to be configured
+between the nodes and the parallax python package to be installed.
+
+Usage:
+...............
+action <rsc> <action>
+action <rsc> <action> ssh
+...............
+Example:
+...............
+action webserver reload
+action webserver monitor ssh
+...............
+
+[[cmdhelp_maintenance_off,Disable maintenance mode]]
+==== `off`
+
+Disables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+off
+off <rsc>
+...............
+Example:
+...............
+off rsc1
+...............
+
+[[cmdhelp_maintenance_on,Enable maintenance mode]]
+==== `on`
+
+Enables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+on
+on <rsc>
+...............
+Example:
+...............
+on rsc1
+...............
+
+[[cmdhelp_history,Cluster history]]
+=== `history` - Cluster history
+
+Examining Pacemaker's history is a particularly involved task. The
+number of subsystems to be considered, the complexity of the
+configuration, and the set of various information sources, most of
+which are not exactly human readable, keep analyzing resource or node
+problems accessible to only the most knowledgeable. Or, depending on
+the point of view, to the most persistent. The following set of
+commands has been devised in hope to make cluster history more
+accessible.
+
+Of course, looking at _all_ history could be time consuming regardless
+of how good the tools at hand are. Therefore, one should first say
+which period he or she wants to analyze. If not otherwise specified,
+the last hour is considered. Logs and other relevant information is
+collected using `crm report`. Since this process takes some time and
+we always need fresh logs, information is refreshed in a much faster
+way using the python parallax module. If +python-parallax+ is not
+found on the system, examining a live cluster is still possible --
+though not as comfortable.
+
+Apart from examining a live cluster, events may be retrieved from a
+report generated by `crm report` (see also the +-H+ option). In that
+case we assume that the period stretching the whole report needs to be
+investigated. Of course, it is still possible to further reduce the
+time range.
+
+If you have discovered an issue that you want to show someone else,
+you can use the `session pack` command to save the current session as
+a tarball, similar to those generated by `crm report`.
+
+In order to minimize the size of the tarball, and to make it easier
+for others to find the interesting events, it is recommended to limit
+the time frame which the saved session covers. This can be done using
+the `timeframe` command (example below).
+
+It is also possible to name the saved session using the `session save`
+command.
+
+Example:
+...............
+crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
+crm(live)history# session save strange_restart
+crm(live)history# session pack
+Report saved in .../strange_restart.tar.bz2
+crm(live)history#
+...............
+
+[[cmdhelp_history_detail,set the level of detail shown]]
+==== `detail`
+
+How much detail to show from the logs. Valid detail levels are either
+`0` or `1`, where `1` is the highest detail level. The default detail
+level is `0`.
+
+Usage:
+...............
+detail <detail_level>
+
+detail_level :: small integer (defaults to 0)
+...............
+Example:
+...............
+detail 1
+...............
+
+[[cmdhelp_history_diff,cluster states/transitions difference]]
+==== `diff`
+
+A transition represents a change in cluster configuration or
+state. Use `diff` to see what has changed between two
+transitions.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+diff <pe> <pe> [status] [html]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+diff 2066 2067
+diff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_history_events,Show events in log]]
+==== `events`
+
+By analysing the log output and looking for particular
+patterns, the `events` command helps sifting through
+the logs to find when particular events like resources
+changing state or node failure may have occurred.
+
+This can be used to generate a combined list of events
+from all nodes.
+
+Usage:
+...............
+events
+...............
+
+Example:
+...............
+events
+...............
+
+[[cmdhelp_history_exclude,exclude log messages]]
+==== `exclude`
+
+If a log is infested with irrelevant messages, those messages may
+be excluded by specifying a regular expression. The regular
+expressions used are Python extended. This command is additive.
+To drop all regular expressions, use +exclude clear+. Run
+`exclude` only to see the current list of regular expressions.
+Excludes are saved along with the history sessions.
+
+Usage:
+...............
+exclude [<regex>|clear]
+...............
+Example:
+...............
+exclude kernel.*ocfs2
+...............
+
+[[cmdhelp_history_graph,generate a directed graph from the PE file]]
+==== `graph`
+
+Create a graphviz graphical layout from the PE file (the
+transition). Every transition contains the cluster configuration
+which was active at the time. See also <<cmdhelp_configure_graph,generate a directed graph
+from configuration>>.
+
+Usage:
+...............
+graph <pe> [<gtype> [<file> [<img_format>]]]
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph -1
+graph 322 dot clu1.conf.dot
+graph 322 dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_history_info,Cluster information summary]]
+==== `info`
+
+The `info` command provides a summary of the information source, which
+can be either a live cluster snapshot or a previously generated
+report.
+
+Usage:
+...............
+info
+...............
+Example:
+...............
+info
+...............
+
+[[cmdhelp_history_latest,show latest news from the cluster]]
+==== `latest`
+
+The `latest` command shows a bit of recent history, more
+precisely whatever happened since the last cluster change (the
+latest transition). If the transition is running, the shell will
+first wait until it finishes.
+
+Usage:
+...............
+latest
+...............
+Example:
+...............
+latest
+...............
+
+[[cmdhelp_history_limit,limit timeframe to be examined]]
+==== `limit` (`timeframe`)
+
+This command can be used to modify the time span to examine. All
+history commands look at events within a certain time span.
+
+For the `live` source, the default time span is the _last hour_.
+
+There is no time span limit for the `crm report` source.
+
+The time period is parsed by the `dateutil` python module. It
+covers a wide range of date formats. For instance:
+
+- 3:00 (today at 3am)
+- 15:00 (today at 3pm)
+- 2010/9/1 2pm (September 1st 2010 at 2pm)
+
+For more examples of valid time/date statements, please refer to the
+`python-dateutil` documentation:
+
+- https://dateutil.readthedocs.org/[dateutil.readthedocs.org]
+
+If the dateutil module is not available, then the time is parsed using
+strptime and only the kind as printed by `date(1)` is allowed:
+
+- Tue Sep 15 20:46:27 CEST 2010
+
+Usage:
+...............
+limit [<from_time>] [<to_time>]
+...............
+Examples:
+...............
+limit 10:15
+limit 15h22m 16h
+limit "Sun 5 20:46" "Sun 5 22:00"
+...............
+
+[[cmdhelp_history_log,log content]]
+==== `log`
+
+Show messages logged on one or more nodes. Leaving out a node
+name produces combined logs of all nodes. Messages are sorted by
+time and, if the terminal emulations supports it, displayed in
+different colours depending on the node to allow for easier
+reading.
+
+The sorting key is the timestamp as written by syslog which
+normally has the maximum resolution of one second. Obviously,
+messages generated by events which share the same timestamp may
+not be sorted in the same way as they happened. Such close events
+may actually happen fairly often.
+
+Usage:
+...............
+log [<node> [<node> ...] ]
+...............
+Example:
+...............
+log node-a
+...............
+
+[[cmdhelp_history_node,node events]]
+==== `node`
+
+Show important events that happened on a node. Important events
+are node lost and join, standby and online, and fence. Use either
+node names or extended regular expressions.
+
+Usage:
+...............
+node <node> [<node> ...]
+...............
+Example:
+...............
+node node1
+...............
+
+[[cmdhelp_history_peinputs,list or get PE input files]]
+==== `peinputs`
+
+Every event in the cluster results in generating one or more
+Policy Engine (PE) files. These files describe future motions of
+resources. The files are listed as full paths in the current
+report directory. Add +v+ to also see the creation time stamps.
+
+Usage:
+...............
+peinputs [{<range>|<number>} ...] [v]
+
+range :: <n1>:<n2>
+...............
+Example:
+...............
+peinputs
+peinputs 440:444 446
+peinputs v
+...............
+
+[[cmdhelp_history_refresh,refresh live report]]
+==== `refresh`
+
+This command makes sense only for the +live+ source and makes
+`crm` collect the latest logs and other relevant information from
+the logs. If you want to make a completely new report, specify
++force+.
+
+Usage:
+...............
+refresh [force]
+...............
+
+[[cmdhelp_history_resource,resource events]]
+==== `resource`
+
+Show actions and any failures that happened on all specified
+resources on all nodes. Normally, one gives resource names as
+arguments, but it is also possible to use extended regular
+expressions. Note that neither groups nor clones or master/slave
+names are ever logged. The resource command is going to expand
+all of these appropriately, so that clone instances or resources
+which are part of a group are shown.
+
+Usage:
+...............
+resource <rsc> [<rsc> ...]
+...............
+Example:
+...............
+resource bigdb public_ip
+resource my_.*_db2
+resource ping_clone
+...............
+
+[[cmdhelp_history_session,manage history sessions]]
+==== `session`
+
+Sometimes you may want to get back to examining a particular
+history period or bug report. In order to make that easier, the
+current settings can be saved and later retrieved.
+
+If the current history being examined is coming from a live
+cluster the logs, PE inputs, and other files are saved too,
+because they may disappear from nodes. For the existing reports
+coming from `crm report`, only the directory location is saved
+(not to waste space).
+
+A history session may also be packed into a tarball which can
+then be sent to support.
+
+Leave out subcommand to see the current session.
+
+Usage:
+...............
+session [{save|load|delete} <name> | pack [<name>] | update | list]
+...............
+Examples:
+...............
+session save bnc966622
+session load rsclost-2
+session list
+...............
+
+[[cmdhelp_history_setnodes,set the list of cluster nodes]]
+==== `setnodes`
+
+In case the host this program runs on is not part of the cluster,
+it is necessary to set the list of nodes.
+
+Usage:
+...............
+setnodes node <node> [<node> ...]
+...............
+Example:
+...............
+setnodes node_a node_b
+...............
+
+[[cmdhelp_history_show,show status or configuration of the PE input file]]
+==== `show`
+
+Every transition is saved as a PE file. Use this command to
+render that PE file either as configuration or status. The
+configuration output is the same as `crm configure show`.
+
+Usage:
+...............
+show <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+show 2066
+show pe-input-2080.bz2 status
+...............
+
+[[cmdhelp_history_source,set source to be examined]]
+==== `source`
+
+Events to be examined can come from the current cluster or from a
+`crm report` report. This command sets the source. `source live`
+sets source to the running cluster and system logs. If no source
+is specified, the current source information is printed.
+
+In case a report source is specified as a file reference, the file
+is going to be unpacked in place where it resides. This directory
+is not removed on exit.
+
+Usage:
+...............
+source [<dir>|<file>|live]
+...............
+Examples:
+...............
+source live
+source /tmp/customer_case_22.tar.bz2
+source /tmp/customer_case_22
+source
+...............
+
+[[cmdhelp_history_transition,show transition]]
+==== `transition`
+
+This command will print actions planned by the PE and run
+graphviz (`dotty`) to display a graphical representation of the
+transition. Of course, for the latter an X11 session is required.
+This command invokes `ptest(8)` in background.
+
+The +showdot+ subcommand runs graphviz (`dotty`) to display a
+graphical representation of the +.dot+ file which has been
+included in the report. Essentially, it shows the calculation
+produced by `pengine` which is installed on the node where the
+report was produced. In optimal case this output should not
+differ from the one produced by the locally installed `pengine`.
+
+The `log` subcommand shows the full log for the duration of the
+transition.
+
+A transition can also be saved to a CIB shadow for further
+analysis or use with `cib` or `configure` commands (use the
+`save` subcommand). The shadow file name defaults to the name of
+the PE input file.
+
+If the PE input file number is not provided, it defaults to the
+last one, i.e. the last transition. The last transition can also
+be referenced with number 0. If the number is negative, then the
+corresponding transition relative to the last one is chosen.
+
+If there are warning and error PE input files or different nodes
+were the DC in the observed timeframe, it may happen that PE
+input file numbers collide. In that case provide some unique part
+of the path to the file.
+
+After the `ptest` output, logs about events that happened during
+the transition are printed.
+
+The `tags` subcommand scans the logs for the transition and return a
+list of key events during that transition. For example, the tag
++error+ will be returned if there are any errors logged during the
+transition.
+
+Usage:
+...............
+transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+transition showdot [<number>|<index>|<file>]
+transition log [<number>|<index>|<file>]
+transition save [<number>|<index>|<file> [name]]
+transition tags [<number>|<index>|<file>]
+...............
+Examples:
+...............
+transition
+transition 444
+transition -1
+transition pe-error-3.bz2
+transition node-a/pengine/pe-input-2.bz2
+transition showdot 444
+transition log
+transition save 0 enigma-22
+...............
+
+[[cmdhelp_history_transitions,List transitions]]
+==== `transitions`
+
+A transition represents a change in cluster configuration or
+state. This command lists the transitions in the current timeframe.
+
+Usage:
+...............
+transitions
+...............
+Example:
+...............
+transitions
+...............
+
+
+[[cmdhelp_history_wdiff,cluster states/transitions difference]]
+==== `wdiff`
+
+A transition represents a change in cluster configuration or
+state. Use `wdiff` to see what has changed between two
+transitions as word differences on a line-by-line basis.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+wdiff <pe> <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+wdiff 2066 2067
+wdiff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_root_report,Create cluster status report,From Code]]
+=== `report`
+See "crm help report" or "crm report --help"
+
+=== `end` (`cd`, `up`)
+
+The `end` command ends the current level and the user moves to
+the parent level. This command is available everywhere.
+
+Usage:
+...............
+end
+...............
+
+=== `help`
+
+The `help` command prints help for the current level or for the
+specified topic (command). This command is available everywhere.
+
+Usage:
+...............
+help [<topic>]
+...............
+
+=== `quit` (`exit`, `bye`)
+
+Leave the program.
+
+FILES
+---
+include::profiles.adoc[]
+
+BUGS
+----
+Even though all sensible configurations (and most of those that
+are not) are going to be supported by the crm shell, I suspect
+that it may still happen that certain XML constructs may confuse
+the tool. When that happens, please file a bug report.
+
+The crm shell will not try to update the objects it does not
+understand. Of course, it is always possible to edit such objects
+in the XML format.
+
+AUTHORS
+-------
+Dejan Muhamedagic, <dejan@suse.de>
+Kristoffer Gronlund <kgronlund@suse.com>
+and many OTHERS
+
+SEE ALSO
+--------
+crm_resource(8), crm_attribute(8), crm_mon(8), cib_shadow(8),
+ptest(8), dotty(1), crm_simulate(8), cibadmin(8)
+
+
+COPYING
+-------
+Copyright \(C) 2008-2013 Dejan Muhamedagic.
+Copyright \(C) 2013 Kristoffer Gronlund.
+
+Free use of this software is granted under the terms of the GNU General Public License (GPL).
+
+//////////////////////
+ vim:ts=4:sw=4:expandtab:
+//////////////////////
diff --git a/doc/crmsh_crm_report.8.adoc b/doc/crmsh_crm_report.8.adoc
new file mode 100644
index 0000000..9e5e91c
--- /dev/null
+++ b/doc/crmsh_crm_report.8.adoc
@@ -0,0 +1,15 @@
+:man source: crmsh_crm_report
+:man version: 4.6.0
+:man manual: crmsh documentation
+
+crmsh_crm_report(8)
+==================
+
+NAME
+----
+crmsh_crm_report - create report for CRM based clusters (Pacemaker)
+
+
+SEE ALSO
+--------
+See "crm help report" or "crm report --help"
diff --git a/doc/development.md b/doc/development.md
new file mode 100644
index 0000000..2406b4d
--- /dev/null
+++ b/doc/development.md
@@ -0,0 +1,314 @@
+# Notes for developers and contributors
+
+This is mostly a list of notes that Dejan prepared for me when I
+started working on crmsh (me being Kristoffer). I've decided to update
+it at least enough to not be completely outdated, so the information
+here should be mostly up-to-date for crmsh 2.1.
+
+## data-manifest
+
+This file contains a list of all shared data files to install.
+
+Whenever a file that is to be installed to `/usr/share/crmsh` is added,
+for example a cluster script or crmsh template, the `data-manifest`
+file needs to be regenerated, by running `./update-data-manifest.sh`.
+
+## Website
+
+To build the website, you will need **Asciidoc**, **Pygments** plus
+two special lexers for Pygments installed as a separate module. This
+module is included in the source tree for crmsh under `contrib`. To
+install the module and build the website, do the following:
+
+```
+cd contrib
+sudo python setup.py install
+cd ..
+cd doc/website-v1
+make
+```
+
+If everything worked out as it should, the website should now be
+generated in `doc/website-v1/gen`.
+
+## Test suite
+
+There are two separate test suites for crmsh:
+
+* `test/unittests` - These are unit tests that test small pieces of
+ code or functionality. To run these tests, run the `test/run` script
+ from the project root.
+
+* `test/testcases` - These are larger integration tests which require
+ a Pacemaker installation on the machine where the tests are to
+ run. Usually, we run these tests using the OBS and the `osc` command
+ line tool:
+
+ 1. Check out the crmsh python package to a directory (usually
+ `~/build-service/network:ha-clustering:Factory/crmsh`)
+
+ 2. Replace the tarball for crmsh in the OBS project with an archive
+ built from the current source tree. Replace the version number with
+ whatever version is the current one on OBS:
+
+ git archive --format=tar --prefix=crmsh-2.3.0+git.1470991992.7deaa3a/ -o <tmpdir>/crmsh-2.3.0+git.1470991992.7deaa3a.tar HEAD
+ bzip2 <tmpdir>/crmsh-2.3.0+git.1470991992.7deaa3a.tar
+ cp <tmpdir>/crmsh-2.3.0+git.1470991992.7deaa3a.tar.bz2 ~/build-service/network:ha-clustering:Factory/crmsh/crmsh-2.3.0+git.1470991992.7deaa3a.tar.bz2
+
+ 3. Build the rpm package for crmsh with the `with_regression_tests`
+ flag set to 1:
+
+ cd ~/build-service/network:ha-clustering:Factory/crmsh
+ osc build -d --no-verify --release=1 --define with_regression_tests 1 openSUSE_Tumbleweed x86_64 crmsh.spec
+
+To simplify this process, there is a utility called `obs` which can be
+downloaded here: https://github.com/krig/obs-scripts
+
+Using the `obs` script, the above is reduced to calling `obs test
+factory`, given an appropriate `obs.conf` file. See the README in the
+obs-scripts project for more details on using `obs`.
+
+## Modules
+
+This is the list of all modules including short descriptions.
+
+- `crm`
+
+ The program. Tries to detect incompatible python versions or a
+ missing crmsh module, and report an understandable error message
+ in either case.
+
+- `crmsh/main.py`
+
+ This is where execution really starts. Verifies the environment
+ and detects the pacemaker version.
+
+- `crmsh/config.py`
+
+ Reads the `crm.conf` configuration file and tries to detect basic
+ information about where pacemaker is located etc. Some magic is
+ used to generate an object hierarchy based on the configuration,
+ so that the rest of the code can access configuration variables
+ directly.
+
+- `crmsh/constants.py`
+
+ Various hard-coded constants. Many of these should probably be
+ read from pacemaker metadata for better compatibility across
+ different versions.
+
+- `crmsh/ui_*.py`
+
+ The UI context (`ui_context.py`) parses the input command and
+ keeps track of which is the current level in the UI. `ui_root.py`
+ is the root of the UI hierarchy.
+
+- `crmsh/help.py`
+
+ Reads help from a text file and presents parts of it in
+ response to the help command. The text file has special
+ anchors to demarcate help topics and command help text.
+
+- `doc/crm.8.adoc`
+
+ Online help in asciidoc format. Several help topics (search
+ for +[[topic_+) and command reference (search for
+ +[[cmdhelp_+). Every user interface change needs to be
+ reflected here. _Actually, every user interface change has to
+ start here_. A source for the +crm(8)+ man page too.
+
+- `crmsh/cibconfig.py`
+
+ Configuration (CIB) manager. Implements the configure level.
+ The bigest and the most complex part. There are three major
+ classes:
+
+ - +CibFactory+: operations on the CIB or parts of it.
+
+ - +CibObject+: every CIB element is implemented in a
+ subclass of +CibObject+. The configuration consists of a
+ set of +CibObject+ instances (subclassed, e.g. +CibNode+ or
+ +CibPrimitive+).
+
+ - +CibObjectSet+: enables operations on sets of CIB
+ elements. Two subclasses with CLI and XML presentations
+ of cib elements. Most operations are going via these
+ subclasses (+show+, +edit+, +save+, +filter+).
+
+- `crmsh/scripts.py`
+
+ Implements the cluster scripts. Reads multiple kinds of script
+ definition languages including the XML wizard format used by
+ Hawk.
+
+- `crmsh/handles.py`
+
+ A primitive handlebar-style templating language used in cluster
+ scripts.
+
+- `crmsh/idmgmt.py`
+
+ CIB id management. Guarantees that all ids are unique.
+ A helper for CibFactory.
+
+- `crmsh/parse.py`
+
+ Parses CLI -> XML.
+
+- `crmsh/cliformat.py`
+
+ Parses XML -> CLI.
+
+ Not as cleanly separated as the CLI parser, mostly a set of
+ functions called from `cibconfig.py`.
+
+- `crmsh/clidisplay.py`, `crmsh/term.py`
+
+ Applies colors to terminal output.
+
+- `crmsh/crm_gv.py`
+
+ Interface to GraphViz. Generates graph specs for dotty(1).
+
+- `crmsh/cibstatus.py`
+
+ CIB status section editor and manipulator (cibstatus
+ level). Interface to crm_simulate.
+
+- `crmsh/ra.py`
+
+ Resource agents interface.
+
+- `crmsh/rsctest.py`
+
+ Resource tester (configure rsctest command).
+
+- `crmsh/history.py`
+
+ Cluster history. Interface to logs and other artifacts left
+ on disk by the cluster.
+
+- `crmsh/log_patterns.py`, `log_patterns_118.py`
+
+ Pacemaker subsystems' log patterns. For versions earlier than
+ 1.1.8 and the latter.
+
+- `crmsh/schema.py`, `pacemaker.py`
+
+ Support for pacemaker RNG schema.
+
+- `crmsh/cache.py`
+
+ A very rudimentary cache implementation. Used to cache
+ results of expensive operations (i.e. ra meta).
+
+- `crmsh/crm_pssh.py`
+
+ Interface to the parallax library for remote SSH commands.
+
+- `crmsh/corosync.py`
+
+ Parse and edit the `corosync.conf` configuration file.
+
+- `crmsh/msg.py`
+
+ Messages for users. Can count lines and include line
+ numbers. Needs refinement.
+
+- `crmsh/utils.py`
+
+ A bag of useful functions. Needs more order.
+
+- `crmsh/xmlutil.py`
+
+ A bag of useful XML functions. Needs more order.
+
+## Code improvements / TODO
+
+These are some thoughts on how to improve maintainability and
+make crmsh nicer. Mostly for people looking at the code, the
+users shouldn't notice much (or any) difference.
+
+Everybody's invited to comment and make further suggestions, in
+particular experienced pythonistas.
+
+### Syntax bug with automatic constraint handling
+
+See issue on github https://github.com/ClusterLabs/crmsh/issues/140 .
+
+The problem is the sequence of modifications: crmsh tries to be too
+smart and changes the constraint which refers to all members of the
+group so that it now refers to the group. But when the group is
+then deleted, the constraint is also deleted.
+
+### Rewrite the hb_report script completely in Python
+
+Right now, the `hb_report` script is written in bash. This means it
+has some duplicated code, for example finding pacemaker binaries,
+with crmsh. It also means that it can be difficult to debug and
+maintain. It would be better if it was completely implemented in
+Python.
+
+### Python 3 compatibility
+
+The code is currently only compatible with Python 2.6+. We will need
+to port crmsh to Python 3 eventually. The best solution for this is
+probably using the six python library which enables code which is
+both Python 2 and Python 3-compatible.
+
+### Validate more using pacemaker schema
+
+- We have the pacemaker CIB schema available (see schema.py),
+however using it is difficult and so it is not used in enough
+places.
+
+### Investigate switching to python-prompt-toolkit
+
+Either switch crmsh over to using the prompt toolkit for
+implementing the interactive mode, or at least look at it
+to see what ideas we can lift.
+
+https://github.com/jonathanslenders/python-prompt-toolkit
+
+### History transition should be able to save the graph to a file
+
+See https://github.com/ClusterLabs/crmsh/issues/98
+
+### Add support for ordering attribute on resource sets
+
+See https://github.com/ClusterLabs/crmsh/issues/84
+
+### Better version detection
+
+Be better at detecting and handling the Pacemaker version.
+Ensure backwards compatibility, for example with old vs.
+new ACL command syntax.
+
+### Syntax highlighting
+
+- syntax highlighting is done before producing output, which
+ is basically wrong and makes code convoluted; it further
+ makes extra processing more difficult
+
+- use a python library (pygments seems to be the best
+ candidate); that should also allow other output formats
+ (not only terminal)
+
+- how to extend pygments to understand a new language? it'd
+ be good to be able to get this _without_ pushing the parser
+ upstream (that would take _long_ to propagate to
+ distributions)
+
+### CibFactory is huge
+
+- this is a single central CIB class, it'd be good to have it
+ split into several smaller classes (how?)
+
+### The element create/update procedure is complex
+
+- not sure how to improve this
+
+### Bad namespace separation
+
+- xmlutil and utils are just a loose collection of functions,
+need to be organized better (get rid of 'from xyz import *')
diff --git a/doc/profiles.adoc b/doc/profiles.adoc
new file mode 100644
index 0000000..7201a81
--- /dev/null
+++ b/doc/profiles.adoc
@@ -0,0 +1,47 @@
+=== /etc/crm/profiles.yml
+
+==== Purpose
+
+YAML file `/etc/crm/profiles.yml` contains Corosync, SBD and Pacemaker parameters for different platforms.
+
+crmsh bootstrap detects system environment and load the corresponding parameters predefined in this file.
+
+==== Syntax
+
+............
+profile_name:
+ key_name: value
+............
+
+The valid profile names are:
+"microsoft-azure", "google-cloud-platform", "amazon-web-services", "s390", "default"
+
+`key_name` is a known Corosync, SBD, or Pacemaker parameters, like
+`corosync.totem.token` or `sbd.watchdog_timeout`.
+
+More details about the parameter definitions please refer to the man page of corosync.conf(5), sbd(8).
+
+Example
+............
+default:
+ corosync.totem.crypto_hash: sha1
+ corosync.totem.crypto_cipher: aes256
+ corosync.totem.token: 5000
+ corosync.totem.join: 60
+ corosync.totem.max_messages: 20
+ corosync.totem.token_retransmits_before_loss_const: 10
+ sbd.watchdog_timeout: 15
+
+microsoft-azure:
+ corosync.totem.token: 30000
+ sbd.watchdog_timeout: 60
+............
+
+==== How the content of the file is interpreted
+
+The profiles has the following properties:
+
+* Profiles are only loaded on bootstrap init node.
+* The "default" profile is loaded in the beginning.
+* Specific profiles will override the corresponding values in the "default" profile (if the specific environment is detected).
+* Users could customize the "default" profile for their needs. For example, those on-premise environments which is not defined yet.
diff --git a/doc/releasing-a-new-version.md b/doc/releasing-a-new-version.md
new file mode 100644
index 0000000..8141f87
--- /dev/null
+++ b/doc/releasing-a-new-version.md
@@ -0,0 +1,199 @@
+# Releasing a new version
+
+A guide to releasing new versions of crmsh.
+
+## Version scheme
+
+We follow a somewhat loose version of Semantic Versioning, with a
+three-part version number:
+
+ <major>.<minor>.<patch>
+
+The major version number is increased rarely, arbitrarily and
+indicates big changes to the shell. Moving from Python 2 to Python 3
+was such a change, for example. It does not indicate breaking
+changes: We try not to make breaking changes at all. If there is a
+breaking change, it is hopefully a mistake that would be fixed with a
+patch. If not, it should be noted very clearly and probably only
+released with a major version number change.
+
+The minor version number indicates new features and bugfixes, but
+hopefully no breaking changes.
+
+The patch version number indicates bugfixes only, and no breaking
+changes.
+
+## Steps
+
+1. Updating the changelog
+
+In `/ChangeLog`, there is a curated list of changes included in this
+release. This log should be updated based on the git history. Remove
+any updates that are tagged `dev:` or `test:` since these are internal
+changes, and clean up the changelog in any other way you might want.
+
+To get the list of changes since the last release, you can use `git
+log` with a custom format. This example gets the changes between 3.0
+and 3.0.1, filtering out any changes tagged `dev:` or `test:`:
+
+ PAGER=cat git log --format="- %s" 3.0.0..3.0.1 | \
+ grep -Ev -- '-[ ](dev|test):.*'
+
+2. Tagging the release
+
+Using `git tag` you can see the list of existing tags. Depending
+on the version being released, you will want to tag the current commit
+with that release. Make it a signed tag:
+
+ git tag -s -u <user@example.com> 4.1.0
+
+In the tag message I usually just put
+
+ Release 4.1.0
+
+(of course, change `4.1.0` to whatever release it is you are tagging.
+
+Remember to push the tag to the Github repository. Assuming that the
+github repository is `origin`, this command should do the trick:
+
+ git push --tags origin
+
+
+3. Release email
+
+Send a release email to users@clusterlabs.org. Here is the template
+that I usually follow with release emails:
+
+```
+Hello everyone!
+
+I'm happy to announce the release of crmsh version <VERSION>.
+
+<DESCRIPTION: some notes about the release>
+
+There are some other changes in this release as well, see the
+ChangeLog for the complete list of changes:
+
+* https://github.com/ClusterLabs/crmsh/blob/<VERSION>/ChangeLog
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/<VERSION>
+
+Packaged versions of crmsh should be available shortly from your
+distribution of choice. Development packages for openSUSE Tumbleweed
+are available from the Open Build System, here:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/<VERSION>.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/<VERSION>.zip
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+```
+
+4. Website update
+
+The crmsh website is hosted on Github as a github page. The URL to the
+website repository is
+
+ https://github.com/crmsh/crmsh.github.io
+
+The website contents themselves (the sources) are found in the regular
+crmsh repository, in the `/doc/website-v1` folder. There is a
+`Makefile` in this folder which can be used to regenerate the
+website.
+
+Doing this requires `asciidoc` and `Pygments` to be installed, as well
+as the custom Pygments filter `ansiclr`.
+
+`ansiclr` is found in the `/contrib/pygments_crmsh_lexers` folder, and
+can be installed by running `python setup.py install` in the
+`/contrib` folder.
+
+**Note: ansiclr seems to be broken at the moment. Just ignore
+it. Everything should still work except some highlighting.**
+
+To create the news update, copy a previous update (found in
+`/doc/website-v1/news`), rename it to an appropriate name based on the
+current date, and replace the contents based on the announcement
+email.
+
+Remember to update the title, author and date information at the top
+of the news entry, to ensure that it appears correctly on the site.
+
+To generate the site including the new entry, run
+
+ make
+
+The new site should now sit in `/doc/website-v1/gen`. To update the
+site, using rsync should work:
+
+ rsync -Pavz doc/website-v1/gen/ <path-to-website-checkout>/crmsh.github.io/
+
+5. Update `network:ha-clustering:Factory`
+
+On the Open Build Service, the project
+`network:ha-clustering:Factory/crmsh` is used as the development
+project for openSUSE Tumbleweed. This project mirrors the state of the
+`master` branch in crmsh, but for policy reasons it is not
+automatically updated.
+
+The following steps assumes that you are a maintainer of
+`network:ha-clustering:Factory`. If not, you can still make the update
+but you will have to branch the `crmsh` package, make the update there
+and then submit an update request using `osc submit`. Then a
+maintainer will have to review your submission.
+
+To update the package and submit to `openSUSE:Factory`, the following
+steps will do the trick. First, check out a local copy of the crmsh
+project:
+
+ osc co network:ha-clustering:Factory crmsh
+ cd network:ha-clustering:Factory/crmsh
+
+If you already have a copy, make sure it is up to date:
+
+ osc update
+
+Update the `_service` file so that the version number reflects the
+latest version of the `master` branch in git.
+
+Pull in the latest changes from git:
+
+ osc service dr
+
+This will update the spec file and the changes file. Clean up the
+changes file if you want (not strictly neccessary), and remove any old
+tarball still in the package directory, and then add/remove the
+changes to osc:
+
+ osc ar
+
+Check that everything looks good, and then commit:
+
+ osc diff
+ osc commit
+
+Once the package has built successfully on OBS, you can submit to
+`openSUSE:Factory`:
+
+ osc submit
+
+6. Update `network:ha-clustering:Stable`
+
+If this is a minor release for the latest major release, or a new
+major release, we should update the version of crmsh hosted at
+`network:ha-clustering:Stable` on OBS.
+
+Since `master` is probably up to date with this latest version, doing
+so should be as simple as submitting crmsh from
+`network:ha-clustering:Factory` to `network:ha-clustering:Stable`.
+
+Once the release is tagged, the announcement email is sent, the
+website updated and the packages updated, the release is done.
+
+Congratulations!
diff --git a/doc/sort-doc.py b/doc/sort-doc.py
new file mode 100644
index 0000000..87c35a2
--- /dev/null
+++ b/doc/sort-doc.py
@@ -0,0 +1,82 @@
+# Tool to sort the documentation alphabetically
+# Makes a lot of assumptions about the structure of the document it edits
+# Looks for special markers that indicate structure
+
+# prints output to stdout
+
+# print lines until in a cmdhelp_<section>
+# collect all cmdhelp_<section>_<subsection> subsections
+# sort and print
+
+import sys
+import re
+
+
+class Sorter(object):
+ def __init__(self):
+ self.current_section = None
+ self.current_subsection = None
+ self.subsections = []
+ self.re_section = re.compile(r'^\[\[cmdhelp_([^_,]+),')
+ self.re_subsection = re.compile(r'^\[\[cmdhelp_([^_]+)_([^,]+),')
+
+ def beginsection(self, line):
+ m = self.re_section.match(line)
+ name = m.group(1)
+ self.current_section = [name, line]
+ self.current_subsection = None
+ self.subsections = []
+ return self.insection
+
+ def insection(self, line):
+ if line.startswith('[[cmdhelp_%s_' % (self.current_section[0])):
+ return self.beginsubsection(line)
+ elif line.startswith('[['):
+ self.finishsection()
+ return self.preprint(line)
+ else:
+ self.current_section[1] += line
+ return self.insection
+
+ def beginsubsection(self, line):
+ m = self.re_subsection.match(line)
+ name = m.group(2)
+ self.current_subsection = [name, line]
+ return self.insubsection
+
+ def insubsection(self, line):
+ if line.startswith('[['):
+ self.subsections.append(self.current_subsection)
+ self.current_subsection = None
+ return self.insection(line)
+ self.current_subsection[1] += line
+ return self.insubsection
+
+ def finishsection(self):
+ if self.current_section:
+ print self.current_section[1],
+ for name, text in sorted(self.subsections, key=lambda x: x[0]):
+ print text,
+ self.current_section = None
+ self.subsections = []
+
+ def preprint(self, line):
+ if self.re_section.match(line):
+ return self.beginsection(line)
+ print line,
+ return self.preprint
+
+ def run(self, lines):
+ action = self.preprint
+ for line in lines:
+ prevaction = action
+ action = action(line)
+ if action is None:
+ print prevaction
+ print self.current_section
+ print self.current_subsection
+ sys.exit(1)
+ if self.current_section:
+ self.finishsection()
+
+Sorter().run(open(sys.argv[1]).readlines())
diff --git a/doc/website-v1/404.adoc b/doc/website-v1/404.adoc
new file mode 100644
index 0000000..926d803
--- /dev/null
+++ b/doc/website-v1/404.adoc
@@ -0,0 +1,9 @@
+404: Page not found
+===================
+
+Apologies, but there is nothing here!
+
+The page you are looking for may have moved.
+
+* link:/documentation[Documentation]
+* link:/faq[Frequently Asked Questions]
diff --git a/doc/website-v1/Makefile b/doc/website-v1/Makefile
new file mode 100644
index 0000000..8ea2f46
--- /dev/null
+++ b/doc/website-v1/Makefile
@@ -0,0 +1,145 @@
+ASCIIDOC := asciidoc
+CRMCONF := crm.conf
+SRC := faq.adoc documentation.adoc development.adoc installation.adoc \
+ configuration.adoc about.adoc rsctest-guide.adoc download.adoc \
+ history-guide.adoc start-guide.adoc man-1.2.adoc scripts.adoc man-2.0.adoc man-3.adoc man-4.3.adoc
+HISTORY_LISTINGS = include/history-guide/nfs-probe-err.typescript \
+ include/history-guide/sample-cluster.conf.crm \
+ include/history-guide/status-probe-fail.typescript \
+ include/history-guide/resource-trace.typescript \
+ include/history-guide/stonith-corosync-stopped.typescript \
+ include/history-guide/basic-transition.typescript \
+ include/history-guide/diff.typescript \
+ include/history-guide/info.typescript \
+ include/history-guide/resource.typescript \
+ include/history-guide/transition-log.typescript
+TGT := $(patsubst %.adoc,gen/%/index.html,$(SRC))
+CSS := css/crm.css css/font-awesome.min.css
+CSS := $(patsubst %,gen/%,$(CSS))
+ICONS := \
+ img/icons/caution.png \
+ img/icons/example.png \
+ img/icons/home.png \
+ img/icons/important.png \
+ img/icons/next.png \
+ img/icons/note.png \
+ img/icons/prev.png \
+ img/icons/tip.png \
+ img/icons/up.png \
+ img/icons/warning.png \
+ img/icons/callouts/10.png \
+ img/icons/callouts/11.png \
+ img/icons/callouts/12.png \
+ img/icons/callouts/13.png \
+ img/icons/callouts/14.png \
+ img/icons/callouts/15.png \
+ img/icons/callouts/1.png \
+ img/icons/callouts/2.png \
+ img/icons/callouts/3.png \
+ img/icons/callouts/4.png \
+ img/icons/callouts/5.png \
+ img/icons/callouts/6.png \
+ img/icons/callouts/7.png \
+ img/icons/callouts/8.png \
+ img/icons/callouts/9.png
+IMG := $(ICONS) img/loader.gif img/laptop.png img/servers.gif \
+ img/history-guide/sample-cluster.conf.png \
+ img/history-guide/smallapache-start.png
+IMG := $(patsubst %,gen/%,$(IMG))
+FONTS := fonts/FontAwesome.otf fonts/fontawesome-webfont.eot \
+ fonts/fontawesome-webfont.svg fonts/fontawesome-webfont.ttf \
+ fonts/fontawesome-webfont.woff
+FONTS := $(patsubst %,gen/%,$(FONTS))
+WATCHDIR := watchdir
+XDGOPEN := xdg-open
+NEWS := $(wildcard news/*.adoc)
+NEWSDOC := $(patsubst %.adoc,gen/%/index.html,$(NEWS))
+
+.PHONY: all clean deploy open
+
+all: site
+
+gen/index.html: index.adoc $(CRMCONF)
+ @mkdir -p $(dir $@)
+ @$(ASCIIDOC) --unsafe -b html5 -a icons -a iconsdir=/img/icons -f $(CRMCONF) -o $@ $<
+ @python ./postprocess.py -o $@ $<
+
+gen/%/index.html: %.adoc $(CRMCONF)
+ @mkdir -p $(dir $@)
+ @$(ASCIIDOC) --unsafe -b html5 -a icons -a iconsdir=/img/icons -f $(CRMCONF) -o $@ $<
+ @python ./postprocess.py -o $@ $<
+
+gen/history-guide/index.html: $(HISTORY_LISTINGS)
+
+gen/man/index.html: ../crm.8.adoc $(CRMCONF)
+ @mkdir -p $(dir $@)
+ @$(ASCIIDOC) --unsafe -b html5 -f $(CRMCONF) -o $@ $<
+ @python ./postprocess.py -o $@ $<
+
+gen/404.html: 404.adoc $(CRMCONF)
+ @mkdir -p $(dir $@)
+ @$(ASCIIDOC) --unsafe -b html5 -f $(CRMCONF) -o $@ $<
+ @python ./postprocess.py -o $@ $<
+
+news.adoc: $(NEWS) $(CRMCONF)
+ @echo "news:" $(NEWS)
+ python ./make-news.py $@ $(NEWS)
+
+gen/news/index.html: news.adoc
+ @mkdir -p $(dir $@)
+ $(ASCIIDOC) --unsafe -b html5 -f $(CRMCONF) -o $@ $<
+ @python ./postprocess.py -o $@ $<
+
+gen/css/%.css: css/%.css
+ @mkdir -p gen/css
+ @cp -r $< $@
+ @echo "+ $@"
+
+gen/js/%.js: js/%.js
+ @mkdir -p gen/js
+ @cp -r $< $@
+ @echo "+ $@"
+
+gen/img/icons/callouts/%: img/icons/callouts/%
+ @mkdir -p gen/img/icons/callouts
+ @cp -r $< $@
+ @echo "+ $@"
+
+gen/img/icons/%: img/icons/%
+ @mkdir -p gen/img/icons
+ @cp -r $< $@
+ @echo "+ $@"
+
+gen/img/history-guide/%: img/history-guide/%
+ @mkdir -p gen/img/history-guide
+ @cp -r $< $@
+ @echo "+ $@"
+
+gen/img/%: img/%
+ @mkdir -p gen/img
+ @cp -r $< $@
+ @echo "+ $@"
+
+gen/fonts/%: fonts/%
+ @mkdir -p gen/fonts
+ @cp -r $< $@
+ @echo "+ $@"
+
+gen/atom.xml: $(NEWSDOC)
+ @echo "atom:" $(NEWSDOC)
+ python ./make-news.py gen/atom.xml $(NEWS)
+
+site: gen/atom.xml gen/index.html gen/404.html gen/news/index.html gen/man/index.html $(TGT) $(CSS) $(IMG) $(FONTS) $(NEWSDOC)
+ @which dos2unix >/dev/null && find gen -name "*.html" -type f -exec dos2unix {} \;
+
+deploy: site
+ @echo "TODO: CVS upload"
+
+open: site
+ @$(XDGOPEN) gen/index.html
+
+watch:
+ @$(WATCHDIR) --verbose --cmd "make" . css img fonts
+
+clean:
+ -@$(RM) -rf gen/* news.adoc
diff --git a/doc/website-v1/about.adoc b/doc/website-v1/about.adoc
new file mode 100644
index 0000000..2656625
--- /dev/null
+++ b/doc/website-v1/about.adoc
@@ -0,0 +1,19 @@
+= About =
+
+== Authors ==
+
+include::../../AUTHORS[]
+
+== Site ==
+
+This site was generated from http://asciidoc.org[AsciiDoc] sources.
+
+The CSS for this site started as a clone of the +bare+ theme by https://github.com/rtomayko/adoc-themes[Ryan Tomayko].
+
+Fonts used are https://www.google.com/fonts/specimen/Open+Sans[Open Sans] and http://fontawesome.io[Font Awesome].
+
+== License ==
+
+`crmsh` is licensed under the GNU General Public License (GPL).
+
+For more information, see https://gnu.org/licenses/gpl.html
diff --git a/doc/website-v1/configuration.adoc b/doc/website-v1/configuration.adoc
new file mode 100644
index 0000000..fb48c93
--- /dev/null
+++ b/doc/website-v1/configuration.adoc
@@ -0,0 +1,132 @@
+= Configuration =
+
+.Version information
+NOTE: This section applies to `crmsh 2.0+` only.
+
+
+`crm` can be configured using both a system-wide configuration file,
+and a per-user configuration file. The values set in the user-local
+file take precedence over the system-wide settings.
+
+The global configuration file is usually installed at
+`/etc/crm/crm.conf`, and the user-local configuration file at
+`~/.config/crm/crm.conf`.
+
+
+== Upgrading from crm 1.x to 2.x ==
+
+The configuration file format and location changed significantly going
+from crm 1.x to 2.x. If `crm` cannot find a user-local configuration
+file when starting up, it will look for an old-style configuration
+file at `~/.crm.rc`. If this file exists, `crm` will prompt the user
+asking if the old-style configuration should be automatically
+converted to a new-style configuration file.
+
+
+== Format description ==
+
+The `settings` file consists of sections introduced by a `[section]`
+header, and followed by `name=value` pairs.
+
+Leading whitespace is stripped from values.
+
+Values can contain format strings referring to other values in the
+same section.
+
+Lines starting with `#` or `;` are interpreted as comments.
+
+Values starting with `$` are interpreted as environment variable
+references, and the value will be retrieved from the named environment
+variable if set.
+
+== Example configuration ==
+
+The example configuration below lists all available options and their
+default values.
+
+----------------------
+[core]
+editor = $EDITOR
+pager = $PAGER
+user =
+skill_level = expert
+sort_elements = yes
+check_frequency = always
+check_mode = strict
+wait = no
+add_quotes = yes
+manage_children = ask
+force = no
+debug = no
+ptest = ptest, crm_simulate
+dotty = dotty
+dot = dot
+
+[path]
+sharedir = /usr/share/crmsh
+cache = /var/cache/crm
+crm_config = /var/lib/pacemaker/cib
+crm_daemon_dir = /usr/lib64/pacemaker
+crm_daemon_user = hacluster
+ocf_root = /usr/lib/ocf
+crm_dtd_dir = /usr/share/pacemaker
+pe_state_dir = /var/lib/pacemaker/pengine
+heartbeat_dir = /var/lib/heartbeat
+hb_delnode = /usr/share/heartbeat/hb_delnode
+nagios_plugins = /usr/lib64/nagios/plugins
+
+[color]
+style = color
+error = red bold
+ok = green bold
+warn = yellow bold
+info = cyan
+help_keyword = blue bold underline
+help_header = normal bold
+help_topic = yellow bold
+help_block = cyan
+keyword = yellow
+identifier = normal
+attr_name = cyan
+attr_value = red
+resource_reference = green
+id_reference = green
+score = magenta
+ticket = magenta
+----------------------
+
+
+== Loading and saving options ==
+
+Options are loaded from the global configuration file first, and the
+user-local file second. This means that the user-local options take
+precedence over the global configuration.
+
+When changing an option using the `options` sublevel, the
+configuration file is written to disk with the new value.
+
+== Syntax highlighting ==
+
+By default, `crm` will try to syntax highlight its output when
+connected to a TTY. To disable this behavior, set the configuration
+value `style = none` in the `[color]` section.
+
+The available color choices may depend on the terminal used, but
+normally include the following:
+
+----
+black blue green cyan red magenta yellow white
+----
+
+Colors can be combined with styles:
+
+----
+bold blink dim reverse underline normal
+----
+
+== Setting options from the interactive shell ==
+
+Options can be set directly from the interactive shell using the
+`options` sublevel. These options will be written to the per-user
+configuration file. Note that changing an option in this way may erase
+comments added to the configuration file.
diff --git a/doc/website-v1/crm.conf b/doc/website-v1/crm.conf
new file mode 100644
index 0000000..d1502cf
--- /dev/null
+++ b/doc/website-v1/crm.conf
@@ -0,0 +1,601 @@
+#
+# html5.conf
+#
+# Asciidoc configuration file.
+# html5 backend.
+#
+
+[miscellaneous]
+outfilesuffix=.html
+
+[attributes]
+basebackend=html
+basebackend-html=
+basebackend-html5=
+b
+[replacements2]
+# Line break.
+(?m)^(.*)\s\+$=\1<br>
+
+[replacements]
+ifdef::asciidoc7compatible[]
+# Superscripts.
+\^(.+?)\^=<sup>\1</sup>
+# Subscripts.
+~(.+?)~=<sub>\1</sub>
+endif::asciidoc7compatible[]
+
+[ruler-blockmacro]
+<hr>
+
+[pagebreak-blockmacro]
+<div style="page-break-after:always"></div>
+
+[blockdef-pass]
+asciimath-style=template="asciimathblock",subs=()
+latexmath-style=template="latexmathblock",subs=()
+
+[macros]
+(?u)^(?P<name>audio|video)::(?P<target>\S*?)(\[(?P<attrlist>.*?)\])$=#
+# math macros.
+# Special characters are escaped in HTML math markup.
+(?su)[\\]?(?P<name>asciimath|latexmath):(?P<subslist>\S*?)\[(?P<passtext>.*?)(?<!\\)\]=[specialcharacters]
+(?u)^(?P<name>asciimath|latexmath)::(?P<subslist>\S*?)(\[(?P<passtext>.*?)\])$=#[specialcharacters]
+
+[asciimath-inlinemacro]
+`{passtext}`
+
+[asciimath-blockmacro]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+`{passtext}`
+</div></div>
+
+[asciimathblock]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+`|`
+</div></div>
+
+[latexmath-inlinemacro]
+{passtext}
+
+[latexmath-blockmacro]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+{passtext}
+</div></div>
+
+[latexmathblock]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+|
+</div></div>
+
+[image-inlinemacro]
+<span class="image{role? {role}}">
+<a class="image" href="{link}">
+{data-uri%}<img src="{imagesdir=}{imagesdir?/}{target}" alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"}>
+{data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"}
+{data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}">
+{link#}</a>
+</span>
+
+[image-blockmacro]
+<div class="imageblock{style? {style}}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}{align? style="text-align:{align};"}{float? style="float:{float};"}>
+<div class="content">
+<a class="image" href="{link}">
+{data-uri%}<img src="{imagesdir=}{imagesdir?/}{target}" alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}>
+{data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}
+{data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}">
+{link#}</a>
+</div>
+<div class="title">{caption={figure-caption} {counter:figure-number}. }{title}</div>
+</div>
+
+[audio-blockmacro]
+<div class="audioblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption=}{title}</div>
+<div class="content">
+<audio src="{imagesdir=}{imagesdir?/}{target}"{autoplay-option? autoplay}{nocontrols-option! controls}{loop-option? loop}>
+Your browser does not support the audio tag.
+</audio>
+</div></div>
+
+[video-blockmacro]
+<div class="videoblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption=}{title}</div>
+<div class="content">
+<video src="{imagesdir=}{imagesdir?/}{target}"{width? width="{width}"}{height? height="{height}"}{poster? poster="{poster}"}{autoplay-option? autoplay}{nocontrols-option! controls}{loop-option? loop}>
+Your browser does not support the video tag.
+</video>
+</div></div>
+
+[unfloat-blockmacro]
+<div style="clear:both;"></div>
+
+[toc-blockmacro]
+template::[toc]
+
+[indexterm-inlinemacro]
+# Index term.
+{empty}
+
+[indexterm2-inlinemacro]
+# Index term.
+# Single entry index term that is visible in the primary text flow.
+{1}
+
+[footnote-inlinemacro]
+# footnote:[<text>].
+<span class="footnote"><br>[{0}]<br></span>
+
+[footnoteref-inlinemacro]
+# footnoteref:[<id>], create reference to footnote.
+{2%}<span class="footnoteref"><br><a href="#_footnote_{1}">[{1}]</a><br></span>
+# footnoteref:[<id>,<text>], create footnote with ID.
+{2#}<span class="footnote" id="_footnote_{1}"><br>[{2}]<br></span>
+
+[callout-inlinemacro]
+ifndef::icons[]
+<b>&lt;{index}&gt;</b>
+endif::icons[]
+ifdef::icons[]
+ifndef::data-uri[]
+<img src="{icon={iconsdir}/callouts/{index}.png}" alt="{index}">
+endif::data-uri[]
+ifdef::data-uri[]
+<img alt="{index}" src="data:image/png;base64,
+{sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/callouts/{index}.png}")}"}">
+endif::data-uri[]
+endif::icons[]
+
+# Comment line macros.
+[comment-inlinemacro]
+{showcomments#}<br><span class="comment">{passtext}</span><br>
+
+[comment-blockmacro]
+{showcomments#}<p><span class="comment">{passtext}</span></p>
+
+[literal-inlinemacro]
+# Inline literal.
+<span class="monospaced">{passtext}</span>
+
+# List tags.
+[listtags-bulleted]
+list=<div class="ulist{style? {style}}{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ul>|</ul></div>
+item=<li>|</li>
+text=<p>|</p>
+
+[listtags-numbered]
+# The start attribute is not valid XHTML 1.1 but all browsers support it.
+list=<div class="olist{style? {style}}{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol class="{style}"{start? start="{start}"}>|</ol></div>
+item=<li>|</li>
+text=<p>|</p>
+
+[listtags-labeled]
+list=<div class="dlist{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<dl>|</dl></div>
+entry=
+label=
+term=<dt class="hdlist1{strong-option? strong}">|</dt>
+item=<dd>|</dd>
+text=<p>|</p>
+
+[listtags-horizontal]
+list=<div class="hdlist{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<table>{labelwidth?<col width="{labelwidth}%">}{itemwidth?<col width="{itemwidth}%">}|</table></div>
+label=<td class="hdlist1{strong-option? strong}">|</td>
+term=|<br>
+entry=<tr>|</tr>
+item=<td class="hdlist2">|</td>
+text=<p style="margin-top: 0;">|</p>
+
+[listtags-qanda]
+list=<div class="qlist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol>|</ol></div>
+entry=<li>|</li>
+label=
+term=<p><em>|</em></p>
+item=
+text=<p>|</p>
+
+[listtags-callout]
+ifndef::icons[]
+list=<div class="colist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol>|</ol></div>
+item=<li>|</li>
+text=<p>|</p>
+endif::icons[]
+ifdef::icons[]
+list=<div class="colist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<table>|</table></div>
+ifndef::data-uri[]
+item=<tr><td><img src="{iconsdir}/callouts/{listindex}.png" alt="{listindex}"></td><td>|</td></tr>
+endif::data-uri[]
+ifdef::data-uri[]
+item=<tr><td><img alt="{listindex}" src="data:image/png;base64, {sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/callouts/{listindex}.png}")}"}"></td><td>|</td></tr>
+endif::data-uri[]
+text=|
+endif::icons[]
+
+[listtags-glossary]
+list=<div class="dlist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<dl>|</dl></div>
+label=
+entry=
+term=<dt>|</dt>
+item=<dd>|</dd>
+text=<p>|</p>
+
+[listtags-bibliography]
+list=<div class="ulist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ul>|</ul></div>
+item=<li>|</li>
+text=<p>|</p>
+
+[tags]
+# Quoted text.
+emphasis=<em>{1?<span class="{1}">}|{1?</span>}</em>
+strong=<strong>{1?<span class="{1}">}|{1?</span>}</strong>
+monospaced=<span class="monospaced{1? {1}}">|</span>
+singlequoted={lsquo}{1?<span class="{1}">}|{1?</span>}{rsquo}
+doublequoted={ldquo}{1?<span class="{1}">}|{1?</span>}{rdquo}
+unquoted={1?<span class="{1}">}|{1?</span>}
+superscript=<sup>{1?<span class="{1}">}|{1?</span>}</sup>
+subscript=<sub>{1?<span class="{1}">}|{1?</span>}</sub>
+
+ifdef::deprecated-quotes[]
+# Override with deprecated quote attributes.
+emphasis={role?<span class="{role}">}<em{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</em>{role?</span>}
+strong={role?<span class="{role}">}<strong{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</strong>{role?</span>}
+monospaced=<span class="monospaced{role? {role}}"{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</span>
+singlequoted={role?<span class="{role}">}{1,2,3?<span style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?">}{amp}#8216;|{amp}#8217;{1,2,3?</span>}{role?</span>}
+doublequoted={role?<span class="{role}">}{1,2,3?<span style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?">}{amp}#8220;|{amp}#8221;{1,2,3?</span>}{role?</span>}
+unquoted={role?<span class="{role}">}{1,2,3?<span style="{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}">}|{1,2,3?</span>}{role?</span>}
+superscript={role?<span class="{role}">}<sup{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</sup>{role?</span>}
+subscript={role?<span class="{role}">}<sub{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</sub>{role?</span>}
+endif::deprecated-quotes[]
+
+# Inline macros
+[http-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[https-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[ftp-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[file-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[irc-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[mailto-inlinemacro]
+<a href="mailto:{target}">{0={target}}</a>
+[link-inlinemacro]
+<a href="{target}">{0={target}}</a>
+[callto-inlinemacro]
+<a href="{name}:{target}">{0={target}}</a>
+# anchor:id[text]
+[anchor-inlinemacro]
+<a id="{target}"></a>
+# [[id,text]]
+[anchor2-inlinemacro]
+<a id="{1}"></a>
+# [[[id]]]
+[anchor3-inlinemacro]
+<a id="{1}"></a>[{1}]
+# xref:id[text]
+[xref-inlinemacro]
+<a href="#{target}">{0=[{target}]}</a>
+# <<id,text>>
+[xref2-inlinemacro]
+<a href="#{1}">{2=[{1}]}</a>
+
+# Special word substitution.
+[emphasizedwords]
+<em>{words}</em>
+[monospacedwords]
+<span class="monospaced">{words}</span>
+[strongwords]
+<strong>{words}</strong>
+
+# Paragraph substitution.
+[paragraph]
+<div class="paragraph{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<p>
+|
+</p></div>
+
+[admonitionparagraph]
+template::[admonitionblock]
+
+# Delimited blocks.
+[listingblock]
+<div class="listingblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption=}{title}</div>
+<div class="content monospaced">
+<pre>
+|
+</pre>
+</div></div>
+
+[literalblock]
+<div class="literalblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<div class="content monospaced">
+<pre>
+|
+</pre>
+</div></div>
+
+[sidebarblock]
+<div class="sidebarblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+|
+</div></div>
+
+[openblock]
+<div class="openblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<div class="content">
+|
+</div></div>
+
+[partintroblock]
+template::[openblock]
+
+[abstractblock]
+template::[quoteblock]
+
+[quoteblock]
+<div class="quoteblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<div class="content">
+|
+</div>
+<div class="attribution">
+<em>{citetitle}</em>{attribution?<br>}
+&#8212; {attribution}
+</div></div>
+
+[verseblock]
+<div class="verseblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<pre class="content">
+|
+</pre>
+<div class="attribution">
+<em>{citetitle}</em>{attribution?<br>}
+&#8212; {attribution}
+</div></div>
+
+[exampleblock]
+<div class="exampleblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption={example-caption} {counter:example-number}. }{title}</div>
+<div class="content">
+|
+</div></div>
+
+[admonitionblock]
+<div class="admonitionblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<table><tr>
+<td class="icon">
+{data-uri%}{icons#}<img src="{icon={iconsdir}/{name}.png}" alt="{caption}">
+{data-uri#}{icons#}<img alt="{caption}" src="data:image/png;base64,
+{data-uri#}{icons#}{sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/{name}.png}")}"}">
+{icons%}<div class="title">{caption}</div>
+</td>
+<td class="content">
+<div class="title">{title}</div>
+|
+</td>
+</tr></table>
+</div>
+
+# Tables.
+[tabletags-default]
+colspec=<col{autowidth-option! style="width:{colpcwidth}%;"}>
+bodyrow=<tr>|</tr>
+headdata=<th class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }>|</th>
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }>|</td>
+paragraph=<p class="tableblock">|</p>
+
+[tabletags-header]
+paragraph=<p class="tableblock header">|</p>
+
+[tabletags-emphasis]
+paragraph=<p class="tableblock"><em>|</em></p>
+
+[tabletags-strong]
+paragraph=<p class="tableblock"><strong>|</strong></p>
+
+[tabletags-monospaced]
+paragraph=<p class="tableblock monospaced">|</p>
+
+[tabletags-verse]
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }><div class="verse">|</div></td>
+paragraph=
+
+[tabletags-literal]
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }><div class="literal monospaced"><pre>|</pre></div></td>
+paragraph=
+
+[tabletags-asciidoc]
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }><div>|</div></td>
+paragraph=
+
+[table]
+<table class="tableblock frame-{frame=all} grid-{grid=all}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}
+style="
+margin-left:{align@left:0}{align@center|right:auto}; margin-right:{align@left|center:auto}{align@right:0};
+float:{float};
+{autowidth-option%}width:{tablepcwidth}%;
+{autowidth-option#}{width#style=width:{tablepcwidth}%;}
+">
+<caption class="title">{caption={table-caption} {counter:table-number}. }{title}</caption>
+{colspecs}
+{headrows#}<thead>
+{headrows}
+{headrows#}</thead>
+{footrows#}<tfoot>
+{footrows}
+{footrows#}</tfoot>
+<tbody>
+{bodyrows}
+</tbody>
+</table>
+
+#--------------------------------------------------------------------
+# Deprecated old table definitions.
+#
+
+[miscellaneous]
+# Screen width in pixels.
+pagewidth=800
+pageunits=px
+
+[old_tabledef-default]
+template=old_table
+colspec=<col style="width:{colwidth}{pageunits};" />
+bodyrow=<tr>|</tr>
+headdata=<th class="tableblock halign-{colalign=left}">|</th>
+footdata=<td class="tableblock halign-{colalign=left}">|</td>
+bodydata=<td class="tableblock halign-{colalign=left}">|</td>
+
+[old_table]
+<table class="tableblock frame-{frame=all} grid-{grid=all}"{id? id="{id}"}>
+<caption class="title">{caption={table-caption}}{title}</caption>
+{colspecs}
+{headrows#}<thead>
+{headrows}
+{headrows#}</thead>
+{footrows#}<tfoot>
+{footrows}
+{footrows#}</tfoot>
+<tbody style="vertical-align:top;">
+{bodyrows}
+</tbody>
+</table>
+
+# End of deprecated old table definitions.
+#--------------------------------------------------------------------
+
+[floatingtitle]
+<h{level@0:1}{level@1:2}{level@2:3}{level@3:4}{level@4:5}{id? id="{id}"} class="float">{title}</h{level@0:1}{level@1:2}{level@2:3}{level@3:4}{level@4:5}>
+
+[preamble]
+# Untitled elements between header and first section title.
+<div id="preamble">
+<div class="sectionbody">
+|
+</div>
+</div>
+
+# Document sections.
+[sect0]
+<h1{id? id="{id}"}>{title}</h1>
+|
+
+[sect1]
+<div class="sect1{style? {style}}{role? {role}}">
+<h2{id? id="{id}"}>{numbered?{sectnum} }{title}</h2>
+<div class="sectionbody">
+|
+</div>
+</div>
+
+[sect2]
+<div class="sect2{style? {style}}{role? {role}}">
+<h3{id? id="{id}"}>{numbered?{sectnum} }{title}</h3>
+|
+</div>
+
+[sect3]
+<div class="sect3{style? {style}}{role? {role}}">
+<h4{id? id="{id}"}>{numbered?{sectnum} }{title}</h4>
+|
+</div>
+
+[sect4]
+<div class="sect4{style? {style}}{role? {role}}">
+<h5{id? id="{id}"}>{title}</h5>
+|
+</div>
+
+[appendix]
+<div class="sect1{style? {style}}{role? {role}}">
+<h2{id? id="{id}"}>{numbered?{sectnum} }{appendix-caption} {counter:appendix-number:A}: {title}</h2>
+<div class="sectionbody">
+|
+</div>
+</div>
+
+[toc]
+<div id="toc">
+ <div id="toctitle">{toc-title}</div>
+ <noscript><p><b>JavaScript must be enabled in your browser to display the table of contents.</b></p></noscript>
+</div>
+
+[header]
+<!DOCTYPE html>
+<html lang="{lang=en}">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset={encoding}">
+<meta name="generator" content="AsciiDoc {asciidoc-version}">
+<meta name="description" content="{description}">
+<meta name="keywords" content="{keywords}">
+<title>crmsh - {title}</title>
+{title%}<title>crmsh - {doctitle=}</title>
+<link rel="stylesheet" href="/css/font-awesome.min.css">
+<link rel="stylesheet" href="/css/crm.css" type="text/css">
+<link href='//fonts.googleapis.com/css?family=Open+Sans:400,700' rel='stylesheet' type='text/css'>
+<link href="/atom.xml" type="application/atom+xml" rel="alternate" title="crmsh atom feed">
+</head>
+<body>
+<div id="header">
+<h1>
+<a href="/"><span class="fa-stack">
+ <i class="fa fa-square fa-stack-2x"></i>
+ <i class="fa fa-terminal fa-stack-1x fa-inverse"></i>
+</span>crmsh</a>
+</h1>
+
+<div id="topbar-small">
+<ul>
+<li><a href="/news" title="News"><i class="fa fa-rss fa-2x"></i></a></li>
+<li><a href="/documentation" title="Documentation"><i class="fa fa-book fa-2x"></i></a></li>
+<li><a href="/download" title="Download"><i class="fa fa-download fa-2x"></i></a></li>
+<li><a href="/development" title="Development"><i class="fa fa-code-fork fa-2x"></i></a></li>
+<li><a href="/about" title="About"><i class="fa fa-question fa-2x"></i></a></li>
+</ul>
+</div>
+
+<div id="topbar">
+<ul>
+<li><a href="/news">News</a></li>
+<li><a href="/documentation">Documentation</a></li>
+<li><a href="/download">Download</a></li>
+<li><a href="/development">Development</a></li>
+<li><a href="/about">About</a></li>
+</ul>
+</div>
+</div>
+<!--TOC-->
+<div id="container">
+<div id="content">
+<h1>{doctitle}</h1>
+
+[footer]
+</div>
+</div>
+<div id="footer">
+<div id="footer-text">
+</div>
+</div>
+
+<a href="https://github.com/ClusterLabs/crmsh"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://camo.githubusercontent.com/a6677b08c955af8400f44c6298f40e7d19cc5b2d/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f677261795f3664366436642e706e67" alt="Fork me on GitHub" data-canonical-src="https://s3.amazonaws.com/github/ribbons/forkme_right_gray_6d6d6d.png"></a>
+
+</body>
+</html>
+
+ifdef::doctype-manpage[]
+[synopsis]
+template::[sect1]
+endif::doctype-manpage[]
+
diff --git a/doc/website-v1/crmold.conf b/doc/website-v1/crmold.conf
new file mode 100644
index 0000000..271d88d
--- /dev/null
+++ b/doc/website-v1/crmold.conf
@@ -0,0 +1,602 @@
+#
+# html5.conf
+#
+# Asciidoc configuration file.
+# html5 backend.
+#
+
+[miscellaneous]
+outfilesuffix=.html
+
+[attributes]
+basebackend=html
+basebackend-html=
+basebackend-html5=
+b
+[replacements2]
+# Line break.
+(?m)^(.*)\s\+$=\1<br>
+
+[replacements]
+ifdef::asciidoc7compatible[]
+# Superscripts.
+\^(.+?)\^=<sup>\1</sup>
+# Subscripts.
+~(.+?)~=<sub>\1</sub>
+endif::asciidoc7compatible[]
+
+[ruler-blockmacro]
+<hr>
+
+[pagebreak-blockmacro]
+<div style="page-break-after:always"></div>
+
+[blockdef-pass]
+asciimath-style=template="asciimathblock",subs=()
+latexmath-style=template="latexmathblock",subs=()
+
+[macros]
+(?u)^(?P<name>audio|video)::(?P<target>\S*?)(\[(?P<attrlist>.*?)\])$=#
+# math macros.
+# Special characters are escaped in HTML math markup.
+(?su)[\\]?(?P<name>asciimath|latexmath):(?P<subslist>\S*?)\[(?P<passtext>.*?)(?<!\\)\]=[specialcharacters]
+(?u)^(?P<name>asciimath|latexmath)::(?P<subslist>\S*?)(\[(?P<passtext>.*?)\])$=#[specialcharacters]
+
+[asciimath-inlinemacro]
+`{passtext}`
+
+[asciimath-blockmacro]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+`{passtext}`
+</div></div>
+
+[asciimathblock]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+`|`
+</div></div>
+
+[latexmath-inlinemacro]
+{passtext}
+
+[latexmath-blockmacro]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+{passtext}
+</div></div>
+
+[latexmathblock]
+<div class="mathblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+|
+</div></div>
+
+[image-inlinemacro]
+<span class="image{role? {role}}">
+<a class="image" href="{link}">
+{data-uri%}<img src="{imagesdir=}{imagesdir?/}{target}" alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"}>
+{data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"}
+{data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}">
+{link#}</a>
+</span>
+
+[image-blockmacro]
+<div class="imageblock{style? {style}}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}{align? style="text-align:{align};"}{float? style="float:{float};"}>
+<div class="content">
+<a class="image" href="{link}">
+{data-uri%}<img src="{imagesdir=}{imagesdir?/}{target}" alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}>
+{data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}
+{data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}">
+{link#}</a>
+</div>
+<div class="title">{caption={figure-caption} {counter:figure-number}. }{title}</div>
+</div>
+
+[audio-blockmacro]
+<div class="audioblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption=}{title}</div>
+<div class="content">
+<audio src="{imagesdir=}{imagesdir?/}{target}"{autoplay-option? autoplay}{nocontrols-option! controls}{loop-option? loop}>
+Your browser does not support the audio tag.
+</audio>
+</div></div>
+
+[video-blockmacro]
+<div class="videoblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption=}{title}</div>
+<div class="content">
+<video src="{imagesdir=}{imagesdir?/}{target}"{width? width="{width}"}{height? height="{height}"}{poster? poster="{poster}"}{autoplay-option? autoplay}{nocontrols-option! controls}{loop-option? loop}>
+Your browser does not support the video tag.
+</video>
+</div></div>
+
+[unfloat-blockmacro]
+<div style="clear:both;"></div>
+
+[toc-blockmacro]
+template::[toc]
+
+[indexterm-inlinemacro]
+# Index term.
+{empty}
+
+[indexterm2-inlinemacro]
+# Index term.
+# Single entry index term that is visible in the primary text flow.
+{1}
+
+[footnote-inlinemacro]
+# footnote:[<text>].
+<span class="footnote"><br>[{0}]<br></span>
+
+[footnoteref-inlinemacro]
+# footnoteref:[<id>], create reference to footnote.
+{2%}<span class="footnoteref"><br><a href="#_footnote_{1}">[{1}]</a><br></span>
+# footnoteref:[<id>,<text>], create footnote with ID.
+{2#}<span class="footnote" id="_footnote_{1}"><br>[{2}]<br></span>
+
+[callout-inlinemacro]
+ifndef::icons[]
+<b>&lt;{index}&gt;</b>
+endif::icons[]
+ifdef::icons[]
+ifndef::data-uri[]
+<img src="{icon={iconsdir}/callouts/{index}.png}" alt="{index}">
+endif::data-uri[]
+ifdef::data-uri[]
+<img alt="{index}" src="data:image/png;base64,
+{sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/callouts/{index}.png}")}"}">
+endif::data-uri[]
+endif::icons[]
+
+# Comment line macros.
+[comment-inlinemacro]
+{showcomments#}<br><span class="comment">{passtext}</span><br>
+
+[comment-blockmacro]
+{showcomments#}<p><span class="comment">{passtext}</span></p>
+
+[literal-inlinemacro]
+# Inline literal.
+<span class="monospaced">{passtext}</span>
+
+# List tags.
+[listtags-bulleted]
+list=<div class="ulist{style? {style}}{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ul>|</ul></div>
+item=<li>|</li>
+text=<p>|</p>
+
+[listtags-numbered]
+# The start attribute is not valid XHTML 1.1 but all browsers support it.
+list=<div class="olist{style? {style}}{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol class="{style}"{start? start="{start}"}>|</ol></div>
+item=<li>|</li>
+text=<p>|</p>
+
+[listtags-labeled]
+list=<div class="dlist{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<dl>|</dl></div>
+entry=
+label=
+term=<dt class="hdlist1{strong-option? strong}">|</dt>
+item=<dd>|</dd>
+text=<p>|</p>
+
+[listtags-horizontal]
+list=<div class="hdlist{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<table>{labelwidth?<col width="{labelwidth}%">}{itemwidth?<col width="{itemwidth}%">}|</table></div>
+label=<td class="hdlist1{strong-option? strong}">|</td>
+term=|<br>
+entry=<tr>|</tr>
+item=<td class="hdlist2">|</td>
+text=<p style="margin-top: 0;">|</p>
+
+[listtags-qanda]
+list=<div class="qlist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol>|</ol></div>
+entry=<li>|</li>
+label=
+term=<p><em>|</em></p>
+item=
+text=<p>|</p>
+
+[listtags-callout]
+ifndef::icons[]
+list=<div class="colist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol>|</ol></div>
+item=<li>|</li>
+text=<p>|</p>
+endif::icons[]
+ifdef::icons[]
+list=<div class="colist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<table>|</table></div>
+ifndef::data-uri[]
+item=<tr><td><img src="{iconsdir}/callouts/{listindex}.png" alt="{listindex}"></td><td>|</td></tr>
+endif::data-uri[]
+ifdef::data-uri[]
+item=<tr><td><img alt="{listindex}" src="data:image/png;base64, {sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/callouts/{listindex}.png}")}"}"></td><td>|</td></tr>
+endif::data-uri[]
+text=|
+endif::icons[]
+
+[listtags-glossary]
+list=<div class="dlist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<dl>|</dl></div>
+label=
+entry=
+term=<dt>|</dt>
+item=<dd>|</dd>
+text=<p>|</p>
+
+[listtags-bibliography]
+list=<div class="ulist{style? {style}}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ul>|</ul></div>
+item=<li>|</li>
+text=<p>|</p>
+
+[tags]
+# Quoted text.
+emphasis=<em>{1?<span class="{1}">}|{1?</span>}</em>
+strong=<strong>{1?<span class="{1}">}|{1?</span>}</strong>
+monospaced=<span class="monospaced{1? {1}}">|</span>
+singlequoted={lsquo}{1?<span class="{1}">}|{1?</span>}{rsquo}
+doublequoted={ldquo}{1?<span class="{1}">}|{1?</span>}{rdquo}
+unquoted={1?<span class="{1}">}|{1?</span>}
+superscript=<sup>{1?<span class="{1}">}|{1?</span>}</sup>
+subscript=<sub>{1?<span class="{1}">}|{1?</span>}</sub>
+
+ifdef::deprecated-quotes[]
+# Override with deprecated quote attributes.
+emphasis={role?<span class="{role}">}<em{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</em>{role?</span>}
+strong={role?<span class="{role}">}<strong{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</strong>{role?</span>}
+monospaced=<span class="monospaced{role? {role}}"{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</span>
+singlequoted={role?<span class="{role}">}{1,2,3?<span style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?">}{amp}#8216;|{amp}#8217;{1,2,3?</span>}{role?</span>}
+doublequoted={role?<span class="{role}">}{1,2,3?<span style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?">}{amp}#8220;|{amp}#8221;{1,2,3?</span>}{role?</span>}
+unquoted={role?<span class="{role}">}{1,2,3?<span style="{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}">}|{1,2,3?</span>}{role?</span>}
+superscript={role?<span class="{role}">}<sup{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</sup>{role?</span>}
+subscript={role?<span class="{role}">}<sub{1,2,3? style="}{1?color:{1};}{2?background-color:{2};}{3?font-size:{3}em;}{1,2,3?"}>|</sub>{role?</span>}
+endif::deprecated-quotes[]
+
+# Inline macros
+[http-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[https-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[ftp-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[file-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[irc-inlinemacro]
+<a href="{name}:{target}">{0={name}:{target}}</a>
+[mailto-inlinemacro]
+<a href="mailto:{target}">{0={target}}</a>
+[link-inlinemacro]
+<a href="{target}">{0={target}}</a>
+[callto-inlinemacro]
+<a href="{name}:{target}">{0={target}}</a>
+# anchor:id[text]
+[anchor-inlinemacro]
+<a id="{target}"></a>
+# [[id,text]]
+[anchor2-inlinemacro]
+<a id="{1}"></a>
+# [[[id]]]
+[anchor3-inlinemacro]
+<a id="{1}"></a>[{1}]
+# xref:id[text]
+[xref-inlinemacro]
+<a href="#{target}">{0=[{target}]}</a>
+# <<id,text>>
+[xref2-inlinemacro]
+<a href="#{1}">{2=[{1}]}</a>
+
+# Special word substitution.
+[emphasizedwords]
+<em>{words}</em>
+[monospacedwords]
+<span class="monospaced">{words}</span>
+[strongwords]
+<strong>{words}</strong>
+
+# Paragraph substitution.
+[paragraph]
+<div class="paragraph{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<p>
+|
+</p></div>
+
+[admonitionparagraph]
+template::[admonitionblock]
+
+# Delimited blocks.
+[listingblock]
+<div class="listingblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption=}{title}</div>
+<div class="content monospaced">
+<pre>
+|
+</pre>
+</div></div>
+
+[literalblock]
+<div class="literalblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<div class="content monospaced">
+<pre>
+|
+</pre>
+</div></div>
+
+[sidebarblock]
+<div class="sidebarblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="content">
+<div class="title">{title}</div>
+|
+</div></div>
+
+[openblock]
+<div class="openblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<div class="content">
+|
+</div></div>
+
+[partintroblock]
+template::[openblock]
+
+[abstractblock]
+template::[quoteblock]
+
+[quoteblock]
+<div class="quoteblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<div class="content">
+|
+</div>
+<div class="attribution">
+<em>{citetitle}</em>{attribution?<br>}
+&#8212; {attribution}
+</div></div>
+
+[verseblock]
+<div class="verseblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{title}</div>
+<pre class="content">
+|
+</pre>
+<div class="attribution">
+<em>{citetitle}</em>{attribution?<br>}
+&#8212; {attribution}
+</div></div>
+
+[exampleblock]
+<div class="exampleblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<div class="title">{caption={example-caption} {counter:example-number}. }{title}</div>
+<div class="content">
+|
+</div></div>
+
+[admonitionblock]
+<div class="admonitionblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}>
+<table><tr>
+<td class="icon">
+{data-uri%}{icons#}<img src="{icon={iconsdir}/{name}.png}" alt="{caption}">
+{data-uri#}{icons#}<img alt="{caption}" src="data:image/png;base64,
+{data-uri#}{icons#}{sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/{name}.png}")}"}">
+{icons%}<div class="title">{caption}</div>
+</td>
+<td class="content">
+<div class="title">{title}</div>
+|
+</td>
+</tr></table>
+</div>
+
+# Tables.
+[tabletags-default]
+colspec=<col{autowidth-option! style="width:{colpcwidth}%;"}>
+bodyrow=<tr>|</tr>
+headdata=<th class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }>|</th>
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }>|</td>
+paragraph=<p class="tableblock">|</p>
+
+[tabletags-header]
+paragraph=<p class="tableblock header">|</p>
+
+[tabletags-emphasis]
+paragraph=<p class="tableblock"><em>|</em></p>
+
+[tabletags-strong]
+paragraph=<p class="tableblock"><strong>|</strong></p>
+
+[tabletags-monospaced]
+paragraph=<p class="tableblock monospaced">|</p>
+
+[tabletags-verse]
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }><div class="verse">|</div></td>
+paragraph=
+
+[tabletags-literal]
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }><div class="literal monospaced"><pre>|</pre></div></td>
+paragraph=
+
+[tabletags-asciidoc]
+bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }><div>|</div></td>
+paragraph=
+
+[table]
+<table class="tableblock frame-{frame=all} grid-{grid=all}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}
+style="
+margin-left:{align@left:0}{align@center|right:auto}; margin-right:{align@left|center:auto}{align@right:0};
+float:{float};
+{autowidth-option%}width:{tablepcwidth}%;
+{autowidth-option#}{width#style=width:{tablepcwidth}%;}
+">
+<caption class="title">{caption={table-caption} {counter:table-number}. }{title}</caption>
+{colspecs}
+{headrows#}<thead>
+{headrows}
+{headrows#}</thead>
+{footrows#}<tfoot>
+{footrows}
+{footrows#}</tfoot>
+<tbody>
+{bodyrows}
+</tbody>
+</table>
+
+#--------------------------------------------------------------------
+# Deprecated old table definitions.
+#
+
+[miscellaneous]
+# Screen width in pixels.
+pagewidth=800
+pageunits=px
+
+[old_tabledef-default]
+template=old_table
+colspec=<col style="width:{colwidth}{pageunits};" />
+bodyrow=<tr>|</tr>
+headdata=<th class="tableblock halign-{colalign=left}">|</th>
+footdata=<td class="tableblock halign-{colalign=left}">|</td>
+bodydata=<td class="tableblock halign-{colalign=left}">|</td>
+
+[old_table]
+<table class="tableblock frame-{frame=all} grid-{grid=all}"{id? id="{id}"}>
+<caption class="title">{caption={table-caption}}{title}</caption>
+{colspecs}
+{headrows#}<thead>
+{headrows}
+{headrows#}</thead>
+{footrows#}<tfoot>
+{footrows}
+{footrows#}</tfoot>
+<tbody style="vertical-align:top;">
+{bodyrows}
+</tbody>
+</table>
+
+# End of deprecated old table definitions.
+#--------------------------------------------------------------------
+
+[floatingtitle]
+<h{level@0:1}{level@1:2}{level@2:3}{level@3:4}{level@4:5}{id? id="{id}"} class="float">{title}</h{level@0:1}{level@1:2}{level@2:3}{level@3:4}{level@4:5}>
+
+[preamble]
+# Untitled elements between header and first section title.
+<div id="preamble">
+<div class="sectionbody">
+|
+</div>
+</div>
+
+# Document sections.
+[sect0]
+<h1{id? id="{id}"}>{title}</h1>
+|
+
+[sect1]
+<div class="sect1{style? {style}}{role? {role}}">
+<h2{id? id="{id}"}>{numbered?{sectnum} }{title}</h2>
+<div class="sectionbody">
+|
+</div>
+</div>
+
+[sect2]
+<div class="sect2{style? {style}}{role? {role}}">
+<h3{id? id="{id}"}>{numbered?{sectnum} }{title}</h3>
+|
+</div>
+
+[sect3]
+<div class="sect3{style? {style}}{role? {role}}">
+<h4{id? id="{id}"}>{numbered?{sectnum} }{title}</h4>
+|
+</div>
+
+[sect4]
+<div class="sect4{style? {style}}{role? {role}}">
+<h5{id? id="{id}"}>{title}</h5>
+|
+</div>
+
+[appendix]
+<div class="sect1{style? {style}}{role? {role}}">
+<h2{id? id="{id}"}>{numbered?{sectnum} }{appendix-caption} {counter:appendix-number:A}: {title}</h2>
+<div class="sectionbody">
+|
+</div>
+</div>
+
+[toc]
+<div id="toc">
+ <div id="toctitle">{toc-title}</div>
+ <noscript><p><b>JavaScript must be enabled in your browser to display the table of contents.</b></p></noscript>
+</div>
+
+[header]
+<!DOCTYPE html>
+<html lang="{lang=en}">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset={encoding}">
+<meta name="generator" content="AsciiDoc {asciidoc-version}">
+<meta name="description" content="{description}">
+<meta name="keywords" content="{keywords}">
+<title>crmsh - {title}</title>
+{title%}<title>crmsh - {doctitle=}</title>
+<link rel="stylesheet" href="http://crmsh.nongnu.org/css/font-awesome.min.css">
+<link rel="stylesheet" href="http://crmsh.nongnu.org/css/crm.css" type="text/css">
+<link href='http://fonts.googleapis.com/css?family=Open+Sans:400,700|Ubuntu+Mono' rel='stylesheet' type='text/css'>
+<link href="http://crmsh.github.io/atom.xml" type="application/atom+xml" rel="alternate" title="crmsh atom feed">
+<style>
+\#movenotice {
+ width: 600px;
+ margin-top: 1em;
+ margin-bottom: 1em;
+ margin-left: auto;
+ margin-right: auto;
+ font-size: 100%;
+ padding: 4px;
+ border: 2px dashed red;
+}
+</style>
+</head>
+<body>
+<div id="header">
+<h1><a href="http://crmsh.github.io/index.html"><span class="fa-stack">
+ <i class="fa fa-square fa-stack-2x"></i>
+ <i class="fa fa-terminal fa-stack-1x fa-inverse"></i>
+</span>crmsh</a></h1>
+<div id="topbar">
+<ul>
+<li><a href="http://crmsh.github.io/news">News</a></li>
+<li><a href="http://crmsh.github.io/documentation">Documentation</a></li>
+<li><a href="http://crmsh.github.io/development">Development</a></li>
+<li><a href="http://crmsh.github.io/about">About</a></li>
+</ul>
+</div>
+</div>
+<!--TOC-->
+<div id="container">
+<div id="content">
+
+<div id="movenotice">We have moved! The website for crmsh is now <a href="http://crmsh.github.io">http://crmsh.github.io</a>.</div>
+
+<h1>{doctitle}</h1>
+
+[footer]
+</div>
+</div>
+<div id="footer">
+<div id="footer-text">
+</div>
+</div>
+
+<a href="https://github.com/crmsh/crmsh"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://camo.githubusercontent.com/652c5b9acfaddf3a9c326fa6bde407b87f7be0f4/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f6f72616e67655f6666373630302e706e67" alt="Fork me on GitHub" data-canonical-src="https://s3.amazonaws.com/github/ribbons/forkme_right_orange_ff7600.png"></a>
+
+</body>
+</html>
+
+ifdef::doctype-manpage[]
+[synopsis]
+template::[sect1]
+endif::doctype-manpage[]
+
diff --git a/doc/website-v1/css/crm.css b/doc/website-v1/css/crm.css
new file mode 100644
index 0000000..9fdfba2
--- /dev/null
+++ b/doc/website-v1/css/crm.css
@@ -0,0 +1,570 @@
+/* ---------------------------------------------------------------------------
+ Based on
+ Bare AsciiDoc styles
+ Ryan Tomayko <r@tomayko.com>
+
+ Heavily modified by
+ Kristoffer Gronlund <kgronlund@suse.com>
+ --------------------------------------------------------------------------- */
+
+/* Fonts */
+
+body {
+ font-family:'Open Sans', 'lucida grande',verdana,helvetica,arial,sans-serif;
+ font-size: 16px;
+ line-height: 21px;
+}
+
+.small {
+ font-size: 12px; /* 75% of the baseline */
+}
+
+.large {
+ font-size: 20px; /* 125% of the baseline */
+}
+
+em {
+ font-style:italic;
+}
+
+strong {
+ font-weight:bold;
+}
+
+.monospaced {
+ font-family: 'Menlo', 'Monaco', 'Consolas', 'lucida console', 'bitstream vera sans mono', 'courier new', monospace;
+}
+
+dt {
+ font-weight:normal;
+}
+
+h1, h2, h3, h4, h5 {
+ font-family:'Open Sans', 'lucida grande',verdana,helvetica,arial,sans-serif;
+ font-weight:normal;
+}
+
+h1 {
+ font-size:2.6rem;
+ line-height:1.428;
+}
+
+h2 {
+ font-size:2rem;
+ line-height:1.36363636; /* repeating, of course */
+}
+
+h3 {
+ font-size:1.6rem;
+ line-height:1.1;
+}
+
+h4 {
+ font-weight: bold;
+ font-size:1.3rem;
+ line-height:1.538;
+}
+
+h5 {
+ font-size:1.2rem;
+ font-style:italic;
+ line-height:1.538;
+}
+
+pre {
+ font-family:'Menlo', 'Monaco', consolas, 'lucida console', 'bitstream vera sans mono', 'courier new', monospace;
+ font-size: 14px;
+}
+
+#header h1 {
+ font-size: 42px;
+ margin: 0px;
+ display: inline;
+}
+
+#topbar {
+ display: inline;
+ font-size: 18px;
+}
+
+#topbar-small {
+ display: none;
+ font-size: 14px;
+}
+
+
+
+/* Style */
+
+body {
+ margin: 0px 0px;
+ padding: 0px;
+ width: 100%;
+ color:#333;
+ background: #fff;
+}
+
+.monospaced {
+ color: #211;
+ background-color: #fafaf8;
+}
+
+p {
+ margin-bottom: 1.3636rem;
+}
+
+ul, ol, dl {
+ margin-top: 1rem;
+ margin-bottom: 2rem;
+}
+
+ul p {
+ margin: 10px 0;
+}
+
+dl {
+ margin-left:40px
+}
+
+dt {
+ color:#000;
+}
+
+h1, h2, h3, h4, h5 {
+ color:#000;
+}
+
+h2, h3, h4, h5 {
+ padding-bottom: 0.333rem;
+ border-bottom: 1px solid #eee;
+}
+
+h1 {
+ margin:0px;
+ margin-top: 48px;
+}
+
+h2 {
+ margin-top: 36px;
+ margin-bottom: 1.5rem;
+}
+
+h3 {
+ margin: 0px;
+ margin-top: 30px;
+}
+
+pre {
+ color: #211;
+ overflow-x: auto;
+}
+
+#header {
+ background: #ecf0f1;
+ padding-left: 24px;
+ padding-top: 4px;
+ padding-bottom: 0px;
+ border-bottom: 2px solid #efefea;
+ width: 100%;
+ height: 86px;
+ overflow: hidden;
+}
+
+#header a {
+ text-decoration: none;
+ color: #34495e;
+}
+
+#header a:hover {
+ color:#ee3300;
+}
+
+#topbar ul {
+ list-style: none;
+ display: inline;
+}
+
+#topbar li {
+ list-style: none;
+ display: inline;
+ padding-right: 1rem;
+}
+
+#topbar-small ul {
+ list-style: none;
+ display: inline;
+ padding-left: 0.5rem;
+}
+
+#topbar-small li {
+ list-style: none;
+ display: inline;
+ padding-right: 0.5rem;
+}
+
+
+#container {
+ max-width: 720px;
+ margin-left: 240px;
+ padding-left: 8px;
+ text-align:left;
+}
+
+#author {
+ color:#999;
+}
+
+a {
+ text-decoration: none;
+ color:#419eda;
+}
+
+a:active {
+ color:#6ec654;
+}
+
+a:hover {
+ color:#ee3300;
+ text-decoration: underline;
+}
+
+
+#content {
+}
+
+h1 {
+ margin-left: auto;
+ margin-right: auto;
+ width: 551px;
+ text-align: center;
+ margin-bottom: 1.5rem;
+}
+
+.frontpage-image {
+ margin-left: auto;
+ margin-right: auto;
+ width: 551px;
+}
+
+.title, .sidebar-title {
+ font-weight:normal;
+ color:#000;
+ margin-bottom:0;
+}
+
+div.content {
+ margin: 8px;
+ padding: 0;
+}
+
+div.admonitionblock .title {
+ font-weight:bold;
+}
+
+div.admonitionblock {
+ margin:30px 0px;
+ color:#555;
+}
+
+div.admonitionblock td.icon {
+ width:30px;
+ padding-right:20px;
+ padding-left:20px;
+ text-transform:uppercase;
+ font-weight:bold;
+ color:#888;
+}
+
+div.listingblock .content {
+ border-left:4px solid #419eda;
+ padding:8px;
+
+ background: #faf7f8;
+
+ background-image: -moz-linear-gradient(left, right,
+ from(#faf7f8),
+ to(#ffffff));
+
+ background-image: -webkit-gradient(linear, left top, right bottom,
+ color-stop(0.00, #faf7f8),
+ color-stop(1.00, #ffffff));
+
+}
+
+div.listingblock .content pre {
+ margin:0;
+}
+
+div.literalblock .content {
+ margin-left: 20px;
+}
+
+div.verseblock .content {
+ white-space:pre
+}
+
+div.sidebarblock {
+ margin-top: 1.5rem;
+ margin-bottom: 2rem;
+}
+
+div.sidebarblock > div.content {
+ border-left:4px solid #ee3300;
+ background: #faf7f8;
+ padding:0 10px;
+ color:#222;
+ font-size: 14px;
+ line-height:18px;
+ max-width: 720px;
+
+ background-image: -moz-linear-gradient(left, right,
+ from(#faf7f8),
+ to(#ffffff));
+
+ background-image: -webkit-gradient(linear, left top, right bottom,
+ color-stop(0.00, #faf7f8),
+ color-stop(1.00, #ffffff));
+
+}
+
+div.sidebarblock .title {
+ margin:10px 0;
+ font-weight:bold;
+ font-size: 14px;
+ color:#442;
+}
+
+.quoteblock-content {
+ font-style:italic;
+ color:#444;
+ margin-left:40px;
+}
+
+.quoteblock-content .attribution {
+ font-style:normal;
+ text-align:right;
+ color:#000;
+}
+
+.exampleblock-content *:first-child { margin-top:0 }
+.exampleblock-content {
+ border-left:2px solid silver;
+ padding-left:8px;
+}
+
+#footnotes {
+ text-align:left;
+}
+
+#footnotes hr {
+ height: 1px;
+ color: #ccc;
+ width: 80%;
+}
+
+#footer {
+ font-size: 12px;
+ color:#888;
+ margin-top:40px;
+ text-align: right;
+}
+
+.nav {
+ margin-bottom: 0;
+ padding-left: 0;
+ list-style: none;
+}
+
+.nav li {
+ line-height: 4rem;
+}
+
+.nav a {
+ font-size: 20px;
+ text-decoration: none;
+}
+
+.feedEkList .newsItem {
+ list-style-type: none;
+}
+
+.feedEkList .itemTitle {
+ font-size: large;
+}
+
+.feedEkList .itemDate {
+ font-size: smaller;
+}
+
+.feedEkList .itemContent {
+}
+
+@media screen {
+ #toc {
+ position: fixed;
+ top: 120px;
+ left: 4px;
+ margin: 0px;
+ font-size: 12px;
+ line-height: 1.2em;
+ }
+
+ #toc a .monospaced {
+ color:#419eda;
+ }
+
+ #toc a {
+ text-decoration: none;
+ }
+
+ #toc .toclevel1 {
+ padding: 1px;
+ margin-top: 8px;
+ font-size: 14px;
+ line-height: 16px;
+ }
+
+ #toc .toclevel2 {
+ margin-left: 8px;
+ padding-left: 4px;
+ font-size: 12px;
+ line-height: 16px;
+ }
+
+ #toc .toclevel3 {
+ margin-left: 24px;
+ padding-left: 4px;
+ font-size: 11px;
+ line-height: 15px;
+ font-weight: bold;
+ }
+
+ #toctitle {
+ margin:20px 0;
+
+ }
+}
+
+@media screen and (max-width: 900px) {
+ #toc {
+ display: none;
+ }
+
+ #container {
+ max-width: 720px;
+ margin: 0px auto;
+ text-align:left;
+ padding-right: 8px;
+ }
+
+ #topbar {
+ display: none;
+ }
+
+ #topbar-small {
+ display: inline;
+ }
+}
+
+@media screen and (min-width: 900px) and (max-width: 1280px) {
+ #toc {
+ position: absolute;
+ overflow: hidden;
+ top: 120px;
+ left: 4px;
+ max-width: 240px;
+ }
+}
+
+@media screen and (min-width: 1280px) {
+ #toc {
+ position: absolute;
+ overflow: hidden;
+ top: 120px;
+ left: 4px;
+ max-width: 240px;
+ }
+
+ #container {
+ max-width: 960px;
+ }
+}
+
+@media screen and (min-width: 1500px) {
+ #container {
+ margin-left: auto;
+ margin-right: auto;
+ }
+}
+
+/* pygments highlighting */
+
+.hll { background-color: #ffffcc }
+.c { color: #999988; font-style: italic } /* Comment */
+.err { color: #a61717; background-color: #e3d2d2 } /* Error */
+.k { color: #000000; font-weight: bold } /* Keyword */
+.o { color: #000000; font-weight: bold } /* Operator */
+.cm { color: #999988; font-style: italic } /* Comment.Multiline */
+.cp { color: #999999; font-weight: bold; font-style: italic } /* Comment.Preproc */
+.c1 { color: #999988; font-style: italic } /* Comment.Single */
+.cs { color: #999999; font-weight: bold; font-style: italic } /* Comment.Special */
+.gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
+.ge { color: #000000; font-style: italic } /* Generic.Emph */
+.gr { color: #aa0000 } /* Generic.Error */
+.gh { color: #999999 } /* Generic.Heading */
+.gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
+.go { color: #888888 } /* Generic.Output */
+.gp { color: #555555 } /* Generic.Prompt */
+.gs { font-weight: bold } /* Generic.Strong */
+.gu { color: #aaaaaa } /* Generic.Subheading */
+.gt { color: #aa0000 } /* Generic.Traceback */
+.kc { color: #000000; font-weight: bold } /* Keyword.Constant */
+.kd { color: #000000; font-weight: bold } /* Keyword.Declaration */
+.kn { color: #000000; font-weight: bold } /* Keyword.Namespace */
+.kp { color: #000000; font-weight: bold } /* Keyword.Pseudo */
+.kr { color: #000000; font-weight: bold } /* Keyword.Reserved */
+.kt { color: #445588; font-weight: bold } /* Keyword.Type */
+.m { color: #009999 } /* Literal.Number */
+.s { color: #d01040 } /* Literal.String */
+.na { color: #008080 } /* Name.Attribute */
+.nb { color: #0086B3 } /* Name.Builtin */
+.nc { color: #445588; font-weight: bold } /* Name.Class */
+.no { color: #008080 } /* Name.Constant */
+.nd { color: #3c5d5d; font-weight: bold } /* Name.Decorator */
+.ni { color: #800080 } /* Name.Entity */
+.ne { color: #990000; font-weight: bold } /* Name.Exception */
+.nf { color: #990000; font-weight: bold } /* Name.Function */
+.nl { color: #990000; font-weight: bold } /* Name.Label */
+.nn { color: #555555 } /* Name.Namespace */
+.nt { color: #000080 } /* Name.Tag */
+.nv { color: #008080 } /* Name.Variable */
+.ow { color: #000000; font-weight: bold } /* Operator.Word */
+.w { color: #bbbbbb } /* Text.Whitespace */
+.mf { color: #009999 } /* Literal.Number.Float */
+.mh { color: #009999 } /* Literal.Number.Hex */
+.mi { color: #009999 } /* Literal.Number.Integer */
+.mo { color: #009999 } /* Literal.Number.Oct */
+.sb { color: #d01040 } /* Literal.String.Backtick */
+.sc { color: #d01040 } /* Literal.String.Char */
+.sd { color: #d01040 } /* Literal.String.Doc */
+.s2 { color: #d01040 } /* Literal.String.Double */
+.se { color: #d01040 } /* Literal.String.Escape */
+.sh { color: #d01040 } /* Literal.String.Heredoc */
+.si { color: #d01040 } /* Literal.String.Interpol */
+.sx { color: #d01040 } /* Literal.String.Other */
+.sr { color: #009926 } /* Literal.String.Regex */
+.s1 { color: #d01040 } /* Literal.String.Single */
+.ss { color: #990073 } /* Literal.String.Symbol */
+.bp { color: #999999 } /* Name.Builtin.Pseudo */
+.vc { color: #008080 } /* Name.Variable.Class */
+.vg { color: #008080 } /* Name.Variable.Global */
+.vi { color: #008080 } /* Name.Variable.Instance */
+.il { color: #009999 } /* Literal.Number.Integer.Long */
+.highlight .-Color-Black { color: #000000 } /* Color.Black */
+.highlight .-Color-Blue { color: #0000c0 } /* Color.Blue */
+.highlight .-Color-Cyan { color: #008080 } /* Color.Cyan */
+.highlight .-Color-Green { color: #008000 } /* Color.Green */
+.highlight .-Color-Magenta { color: #c000c0 } /* Color.Magenta */
+.highlight .-Color-Red { color: #c00000 } /* Color.Red */
+.highlight .-Color-White { color: #c0c0c0 } /* Color.White */
+.highlight .-Color-Yellow { color: #808000 } /* Color.Yellow */
diff --git a/doc/website-v1/css/font-awesome.css b/doc/website-v1/css/font-awesome.css
new file mode 100644
index 0000000..048cff9
--- /dev/null
+++ b/doc/website-v1/css/font-awesome.css
@@ -0,0 +1,1338 @@
+/*!
+ * Font Awesome 4.0.3 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+/* FONT PATH
+ * -------------------------- */
+@font-face {
+ font-family: 'FontAwesome';
+ src: url('../fonts/fontawesome-webfont.eot?v=4.0.3');
+ src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.0.3') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.0.3') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.0.3') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.0.3#fontawesomeregular') format('svg');
+ font-weight: normal;
+ font-style: normal;
+}
+.fa {
+ display: inline-block;
+ font-family: FontAwesome;
+ font-style: normal;
+ font-weight: normal;
+ line-height: 1;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+/* makes the font 33% larger relative to the icon container */
+.fa-lg {
+ font-size: 1.3333333333333333em;
+ line-height: 0.75em;
+ vertical-align: -15%;
+}
+.fa-2x {
+ font-size: 2em;
+}
+.fa-3x {
+ font-size: 3em;
+}
+.fa-4x {
+ font-size: 4em;
+}
+.fa-5x {
+ font-size: 5em;
+}
+.fa-fw {
+ width: 1.2857142857142858em;
+ text-align: center;
+}
+.fa-ul {
+ padding-left: 0;
+ margin-left: 2.142857142857143em;
+ list-style-type: none;
+}
+.fa-ul > li {
+ position: relative;
+}
+.fa-li {
+ position: absolute;
+ left: -2.142857142857143em;
+ width: 2.142857142857143em;
+ top: 0.14285714285714285em;
+ text-align: center;
+}
+.fa-li.fa-lg {
+ left: -1.8571428571428572em;
+}
+.fa-border {
+ padding: .2em .25em .15em;
+ border: solid 0.08em #eeeeee;
+ border-radius: .1em;
+}
+.pull-right {
+ float: right;
+}
+.pull-left {
+ float: left;
+}
+.fa.pull-left {
+ margin-right: .3em;
+}
+.fa.pull-right {
+ margin-left: .3em;
+}
+.fa-spin {
+ -webkit-animation: spin 2s infinite linear;
+ -moz-animation: spin 2s infinite linear;
+ -o-animation: spin 2s infinite linear;
+ animation: spin 2s infinite linear;
+}
+@-moz-keyframes spin {
+ 0% {
+ -moz-transform: rotate(0deg);
+ }
+ 100% {
+ -moz-transform: rotate(359deg);
+ }
+}
+@-webkit-keyframes spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ }
+}
+@-o-keyframes spin {
+ 0% {
+ -o-transform: rotate(0deg);
+ }
+ 100% {
+ -o-transform: rotate(359deg);
+ }
+}
+@-ms-keyframes spin {
+ 0% {
+ -ms-transform: rotate(0deg);
+ }
+ 100% {
+ -ms-transform: rotate(359deg);
+ }
+}
+@keyframes spin {
+ 0% {
+ transform: rotate(0deg);
+ }
+ 100% {
+ transform: rotate(359deg);
+ }
+}
+.fa-rotate-90 {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
+ -webkit-transform: rotate(90deg);
+ -moz-transform: rotate(90deg);
+ -ms-transform: rotate(90deg);
+ -o-transform: rotate(90deg);
+ transform: rotate(90deg);
+}
+.fa-rotate-180 {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
+ -webkit-transform: rotate(180deg);
+ -moz-transform: rotate(180deg);
+ -ms-transform: rotate(180deg);
+ -o-transform: rotate(180deg);
+ transform: rotate(180deg);
+}
+.fa-rotate-270 {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
+ -webkit-transform: rotate(270deg);
+ -moz-transform: rotate(270deg);
+ -ms-transform: rotate(270deg);
+ -o-transform: rotate(270deg);
+ transform: rotate(270deg);
+}
+.fa-flip-horizontal {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
+ -webkit-transform: scale(-1, 1);
+ -moz-transform: scale(-1, 1);
+ -ms-transform: scale(-1, 1);
+ -o-transform: scale(-1, 1);
+ transform: scale(-1, 1);
+}
+.fa-flip-vertical {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
+ -webkit-transform: scale(1, -1);
+ -moz-transform: scale(1, -1);
+ -ms-transform: scale(1, -1);
+ -o-transform: scale(1, -1);
+ transform: scale(1, -1);
+}
+.fa-stack {
+ position: relative;
+ display: inline-block;
+ width: 2em;
+ height: 2em;
+ line-height: 2em;
+ vertical-align: middle;
+}
+.fa-stack-1x,
+.fa-stack-2x {
+ position: absolute;
+ left: 0;
+ width: 100%;
+ text-align: center;
+}
+.fa-stack-1x {
+ line-height: inherit;
+}
+.fa-stack-2x {
+ font-size: 2em;
+}
+.fa-inverse {
+ color: #ffffff;
+}
+/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
+ readers do not read off random characters that represent icons */
+.fa-glass:before {
+ content: "\f000";
+}
+.fa-music:before {
+ content: "\f001";
+}
+.fa-search:before {
+ content: "\f002";
+}
+.fa-envelope-o:before {
+ content: "\f003";
+}
+.fa-heart:before {
+ content: "\f004";
+}
+.fa-star:before {
+ content: "\f005";
+}
+.fa-star-o:before {
+ content: "\f006";
+}
+.fa-user:before {
+ content: "\f007";
+}
+.fa-film:before {
+ content: "\f008";
+}
+.fa-th-large:before {
+ content: "\f009";
+}
+.fa-th:before {
+ content: "\f00a";
+}
+.fa-th-list:before {
+ content: "\f00b";
+}
+.fa-check:before {
+ content: "\f00c";
+}
+.fa-times:before {
+ content: "\f00d";
+}
+.fa-search-plus:before {
+ content: "\f00e";
+}
+.fa-search-minus:before {
+ content: "\f010";
+}
+.fa-power-off:before {
+ content: "\f011";
+}
+.fa-signal:before {
+ content: "\f012";
+}
+.fa-gear:before,
+.fa-cog:before {
+ content: "\f013";
+}
+.fa-trash-o:before {
+ content: "\f014";
+}
+.fa-home:before {
+ content: "\f015";
+}
+.fa-file-o:before {
+ content: "\f016";
+}
+.fa-clock-o:before {
+ content: "\f017";
+}
+.fa-road:before {
+ content: "\f018";
+}
+.fa-download:before {
+ content: "\f019";
+}
+.fa-arrow-circle-o-down:before {
+ content: "\f01a";
+}
+.fa-arrow-circle-o-up:before {
+ content: "\f01b";
+}
+.fa-inbox:before {
+ content: "\f01c";
+}
+.fa-play-circle-o:before {
+ content: "\f01d";
+}
+.fa-rotate-right:before,
+.fa-repeat:before {
+ content: "\f01e";
+}
+.fa-refresh:before {
+ content: "\f021";
+}
+.fa-list-alt:before {
+ content: "\f022";
+}
+.fa-lock:before {
+ content: "\f023";
+}
+.fa-flag:before {
+ content: "\f024";
+}
+.fa-headphones:before {
+ content: "\f025";
+}
+.fa-volume-off:before {
+ content: "\f026";
+}
+.fa-volume-down:before {
+ content: "\f027";
+}
+.fa-volume-up:before {
+ content: "\f028";
+}
+.fa-qrcode:before {
+ content: "\f029";
+}
+.fa-barcode:before {
+ content: "\f02a";
+}
+.fa-tag:before {
+ content: "\f02b";
+}
+.fa-tags:before {
+ content: "\f02c";
+}
+.fa-book:before {
+ content: "\f02d";
+}
+.fa-bookmark:before {
+ content: "\f02e";
+}
+.fa-print:before {
+ content: "\f02f";
+}
+.fa-camera:before {
+ content: "\f030";
+}
+.fa-font:before {
+ content: "\f031";
+}
+.fa-bold:before {
+ content: "\f032";
+}
+.fa-italic:before {
+ content: "\f033";
+}
+.fa-text-height:before {
+ content: "\f034";
+}
+.fa-text-width:before {
+ content: "\f035";
+}
+.fa-align-left:before {
+ content: "\f036";
+}
+.fa-align-center:before {
+ content: "\f037";
+}
+.fa-align-right:before {
+ content: "\f038";
+}
+.fa-align-justify:before {
+ content: "\f039";
+}
+.fa-list:before {
+ content: "\f03a";
+}
+.fa-dedent:before,
+.fa-outdent:before {
+ content: "\f03b";
+}
+.fa-indent:before {
+ content: "\f03c";
+}
+.fa-video-camera:before {
+ content: "\f03d";
+}
+.fa-picture-o:before {
+ content: "\f03e";
+}
+.fa-pencil:before {
+ content: "\f040";
+}
+.fa-map-marker:before {
+ content: "\f041";
+}
+.fa-adjust:before {
+ content: "\f042";
+}
+.fa-tint:before {
+ content: "\f043";
+}
+.fa-edit:before,
+.fa-pencil-square-o:before {
+ content: "\f044";
+}
+.fa-share-square-o:before {
+ content: "\f045";
+}
+.fa-check-square-o:before {
+ content: "\f046";
+}
+.fa-arrows:before {
+ content: "\f047";
+}
+.fa-step-backward:before {
+ content: "\f048";
+}
+.fa-fast-backward:before {
+ content: "\f049";
+}
+.fa-backward:before {
+ content: "\f04a";
+}
+.fa-play:before {
+ content: "\f04b";
+}
+.fa-pause:before {
+ content: "\f04c";
+}
+.fa-stop:before {
+ content: "\f04d";
+}
+.fa-forward:before {
+ content: "\f04e";
+}
+.fa-fast-forward:before {
+ content: "\f050";
+}
+.fa-step-forward:before {
+ content: "\f051";
+}
+.fa-eject:before {
+ content: "\f052";
+}
+.fa-chevron-left:before {
+ content: "\f053";
+}
+.fa-chevron-right:before {
+ content: "\f054";
+}
+.fa-plus-circle:before {
+ content: "\f055";
+}
+.fa-minus-circle:before {
+ content: "\f056";
+}
+.fa-times-circle:before {
+ content: "\f057";
+}
+.fa-check-circle:before {
+ content: "\f058";
+}
+.fa-question-circle:before {
+ content: "\f059";
+}
+.fa-info-circle:before {
+ content: "\f05a";
+}
+.fa-crosshairs:before {
+ content: "\f05b";
+}
+.fa-times-circle-o:before {
+ content: "\f05c";
+}
+.fa-check-circle-o:before {
+ content: "\f05d";
+}
+.fa-ban:before {
+ content: "\f05e";
+}
+.fa-arrow-left:before {
+ content: "\f060";
+}
+.fa-arrow-right:before {
+ content: "\f061";
+}
+.fa-arrow-up:before {
+ content: "\f062";
+}
+.fa-arrow-down:before {
+ content: "\f063";
+}
+.fa-mail-forward:before,
+.fa-share:before {
+ content: "\f064";
+}
+.fa-expand:before {
+ content: "\f065";
+}
+.fa-compress:before {
+ content: "\f066";
+}
+.fa-plus:before {
+ content: "\f067";
+}
+.fa-minus:before {
+ content: "\f068";
+}
+.fa-asterisk:before {
+ content: "\f069";
+}
+.fa-exclamation-circle:before {
+ content: "\f06a";
+}
+.fa-gift:before {
+ content: "\f06b";
+}
+.fa-leaf:before {
+ content: "\f06c";
+}
+.fa-fire:before {
+ content: "\f06d";
+}
+.fa-eye:before {
+ content: "\f06e";
+}
+.fa-eye-slash:before {
+ content: "\f070";
+}
+.fa-warning:before,
+.fa-exclamation-triangle:before {
+ content: "\f071";
+}
+.fa-plane:before {
+ content: "\f072";
+}
+.fa-calendar:before {
+ content: "\f073";
+}
+.fa-random:before {
+ content: "\f074";
+}
+.fa-comment:before {
+ content: "\f075";
+}
+.fa-magnet:before {
+ content: "\f076";
+}
+.fa-chevron-up:before {
+ content: "\f077";
+}
+.fa-chevron-down:before {
+ content: "\f078";
+}
+.fa-retweet:before {
+ content: "\f079";
+}
+.fa-shopping-cart:before {
+ content: "\f07a";
+}
+.fa-folder:before {
+ content: "\f07b";
+}
+.fa-folder-open:before {
+ content: "\f07c";
+}
+.fa-arrows-v:before {
+ content: "\f07d";
+}
+.fa-arrows-h:before {
+ content: "\f07e";
+}
+.fa-bar-chart-o:before {
+ content: "\f080";
+}
+.fa-twitter-square:before {
+ content: "\f081";
+}
+.fa-facebook-square:before {
+ content: "\f082";
+}
+.fa-camera-retro:before {
+ content: "\f083";
+}
+.fa-key:before {
+ content: "\f084";
+}
+.fa-gears:before,
+.fa-cogs:before {
+ content: "\f085";
+}
+.fa-comments:before {
+ content: "\f086";
+}
+.fa-thumbs-o-up:before {
+ content: "\f087";
+}
+.fa-thumbs-o-down:before {
+ content: "\f088";
+}
+.fa-star-half:before {
+ content: "\f089";
+}
+.fa-heart-o:before {
+ content: "\f08a";
+}
+.fa-sign-out:before {
+ content: "\f08b";
+}
+.fa-linkedin-square:before {
+ content: "\f08c";
+}
+.fa-thumb-tack:before {
+ content: "\f08d";
+}
+.fa-external-link:before {
+ content: "\f08e";
+}
+.fa-sign-in:before {
+ content: "\f090";
+}
+.fa-trophy:before {
+ content: "\f091";
+}
+.fa-github-square:before {
+ content: "\f092";
+}
+.fa-upload:before {
+ content: "\f093";
+}
+.fa-lemon-o:before {
+ content: "\f094";
+}
+.fa-phone:before {
+ content: "\f095";
+}
+.fa-square-o:before {
+ content: "\f096";
+}
+.fa-bookmark-o:before {
+ content: "\f097";
+}
+.fa-phone-square:before {
+ content: "\f098";
+}
+.fa-twitter:before {
+ content: "\f099";
+}
+.fa-facebook:before {
+ content: "\f09a";
+}
+.fa-github:before {
+ content: "\f09b";
+}
+.fa-unlock:before {
+ content: "\f09c";
+}
+.fa-credit-card:before {
+ content: "\f09d";
+}
+.fa-rss:before {
+ content: "\f09e";
+}
+.fa-hdd-o:before {
+ content: "\f0a0";
+}
+.fa-bullhorn:before {
+ content: "\f0a1";
+}
+.fa-bell:before {
+ content: "\f0f3";
+}
+.fa-certificate:before {
+ content: "\f0a3";
+}
+.fa-hand-o-right:before {
+ content: "\f0a4";
+}
+.fa-hand-o-left:before {
+ content: "\f0a5";
+}
+.fa-hand-o-up:before {
+ content: "\f0a6";
+}
+.fa-hand-o-down:before {
+ content: "\f0a7";
+}
+.fa-arrow-circle-left:before {
+ content: "\f0a8";
+}
+.fa-arrow-circle-right:before {
+ content: "\f0a9";
+}
+.fa-arrow-circle-up:before {
+ content: "\f0aa";
+}
+.fa-arrow-circle-down:before {
+ content: "\f0ab";
+}
+.fa-globe:before {
+ content: "\f0ac";
+}
+.fa-wrench:before {
+ content: "\f0ad";
+}
+.fa-tasks:before {
+ content: "\f0ae";
+}
+.fa-filter:before {
+ content: "\f0b0";
+}
+.fa-briefcase:before {
+ content: "\f0b1";
+}
+.fa-arrows-alt:before {
+ content: "\f0b2";
+}
+.fa-group:before,
+.fa-users:before {
+ content: "\f0c0";
+}
+.fa-chain:before,
+.fa-link:before {
+ content: "\f0c1";
+}
+.fa-cloud:before {
+ content: "\f0c2";
+}
+.fa-flask:before {
+ content: "\f0c3";
+}
+.fa-cut:before,
+.fa-scissors:before {
+ content: "\f0c4";
+}
+.fa-copy:before,
+.fa-files-o:before {
+ content: "\f0c5";
+}
+.fa-paperclip:before {
+ content: "\f0c6";
+}
+.fa-save:before,
+.fa-floppy-o:before {
+ content: "\f0c7";
+}
+.fa-square:before {
+ content: "\f0c8";
+}
+.fa-bars:before {
+ content: "\f0c9";
+}
+.fa-list-ul:before {
+ content: "\f0ca";
+}
+.fa-list-ol:before {
+ content: "\f0cb";
+}
+.fa-strikethrough:before {
+ content: "\f0cc";
+}
+.fa-underline:before {
+ content: "\f0cd";
+}
+.fa-table:before {
+ content: "\f0ce";
+}
+.fa-magic:before {
+ content: "\f0d0";
+}
+.fa-truck:before {
+ content: "\f0d1";
+}
+.fa-pinterest:before {
+ content: "\f0d2";
+}
+.fa-pinterest-square:before {
+ content: "\f0d3";
+}
+.fa-google-plus-square:before {
+ content: "\f0d4";
+}
+.fa-google-plus:before {
+ content: "\f0d5";
+}
+.fa-money:before {
+ content: "\f0d6";
+}
+.fa-caret-down:before {
+ content: "\f0d7";
+}
+.fa-caret-up:before {
+ content: "\f0d8";
+}
+.fa-caret-left:before {
+ content: "\f0d9";
+}
+.fa-caret-right:before {
+ content: "\f0da";
+}
+.fa-columns:before {
+ content: "\f0db";
+}
+.fa-unsorted:before,
+.fa-sort:before {
+ content: "\f0dc";
+}
+.fa-sort-down:before,
+.fa-sort-asc:before {
+ content: "\f0dd";
+}
+.fa-sort-up:before,
+.fa-sort-desc:before {
+ content: "\f0de";
+}
+.fa-envelope:before {
+ content: "\f0e0";
+}
+.fa-linkedin:before {
+ content: "\f0e1";
+}
+.fa-rotate-left:before,
+.fa-undo:before {
+ content: "\f0e2";
+}
+.fa-legal:before,
+.fa-gavel:before {
+ content: "\f0e3";
+}
+.fa-dashboard:before,
+.fa-tachometer:before {
+ content: "\f0e4";
+}
+.fa-comment-o:before {
+ content: "\f0e5";
+}
+.fa-comments-o:before {
+ content: "\f0e6";
+}
+.fa-flash:before,
+.fa-bolt:before {
+ content: "\f0e7";
+}
+.fa-sitemap:before {
+ content: "\f0e8";
+}
+.fa-umbrella:before {
+ content: "\f0e9";
+}
+.fa-paste:before,
+.fa-clipboard:before {
+ content: "\f0ea";
+}
+.fa-lightbulb-o:before {
+ content: "\f0eb";
+}
+.fa-exchange:before {
+ content: "\f0ec";
+}
+.fa-cloud-download:before {
+ content: "\f0ed";
+}
+.fa-cloud-upload:before {
+ content: "\f0ee";
+}
+.fa-user-md:before {
+ content: "\f0f0";
+}
+.fa-stethoscope:before {
+ content: "\f0f1";
+}
+.fa-suitcase:before {
+ content: "\f0f2";
+}
+.fa-bell-o:before {
+ content: "\f0a2";
+}
+.fa-coffee:before {
+ content: "\f0f4";
+}
+.fa-cutlery:before {
+ content: "\f0f5";
+}
+.fa-file-text-o:before {
+ content: "\f0f6";
+}
+.fa-building-o:before {
+ content: "\f0f7";
+}
+.fa-hospital-o:before {
+ content: "\f0f8";
+}
+.fa-ambulance:before {
+ content: "\f0f9";
+}
+.fa-medkit:before {
+ content: "\f0fa";
+}
+.fa-fighter-jet:before {
+ content: "\f0fb";
+}
+.fa-beer:before {
+ content: "\f0fc";
+}
+.fa-h-square:before {
+ content: "\f0fd";
+}
+.fa-plus-square:before {
+ content: "\f0fe";
+}
+.fa-angle-double-left:before {
+ content: "\f100";
+}
+.fa-angle-double-right:before {
+ content: "\f101";
+}
+.fa-angle-double-up:before {
+ content: "\f102";
+}
+.fa-angle-double-down:before {
+ content: "\f103";
+}
+.fa-angle-left:before {
+ content: "\f104";
+}
+.fa-angle-right:before {
+ content: "\f105";
+}
+.fa-angle-up:before {
+ content: "\f106";
+}
+.fa-angle-down:before {
+ content: "\f107";
+}
+.fa-desktop:before {
+ content: "\f108";
+}
+.fa-laptop:before {
+ content: "\f109";
+}
+.fa-tablet:before {
+ content: "\f10a";
+}
+.fa-mobile-phone:before,
+.fa-mobile:before {
+ content: "\f10b";
+}
+.fa-circle-o:before {
+ content: "\f10c";
+}
+.fa-quote-left:before {
+ content: "\f10d";
+}
+.fa-quote-right:before {
+ content: "\f10e";
+}
+.fa-spinner:before {
+ content: "\f110";
+}
+.fa-circle:before {
+ content: "\f111";
+}
+.fa-mail-reply:before,
+.fa-reply:before {
+ content: "\f112";
+}
+.fa-github-alt:before {
+ content: "\f113";
+}
+.fa-folder-o:before {
+ content: "\f114";
+}
+.fa-folder-open-o:before {
+ content: "\f115";
+}
+.fa-smile-o:before {
+ content: "\f118";
+}
+.fa-frown-o:before {
+ content: "\f119";
+}
+.fa-meh-o:before {
+ content: "\f11a";
+}
+.fa-gamepad:before {
+ content: "\f11b";
+}
+.fa-keyboard-o:before {
+ content: "\f11c";
+}
+.fa-flag-o:before {
+ content: "\f11d";
+}
+.fa-flag-checkered:before {
+ content: "\f11e";
+}
+.fa-terminal:before {
+ content: "\f120";
+}
+.fa-code:before {
+ content: "\f121";
+}
+.fa-reply-all:before {
+ content: "\f122";
+}
+.fa-mail-reply-all:before {
+ content: "\f122";
+}
+.fa-star-half-empty:before,
+.fa-star-half-full:before,
+.fa-star-half-o:before {
+ content: "\f123";
+}
+.fa-location-arrow:before {
+ content: "\f124";
+}
+.fa-crop:before {
+ content: "\f125";
+}
+.fa-code-fork:before {
+ content: "\f126";
+}
+.fa-unlink:before,
+.fa-chain-broken:before {
+ content: "\f127";
+}
+.fa-question:before {
+ content: "\f128";
+}
+.fa-info:before {
+ content: "\f129";
+}
+.fa-exclamation:before {
+ content: "\f12a";
+}
+.fa-superscript:before {
+ content: "\f12b";
+}
+.fa-subscript:before {
+ content: "\f12c";
+}
+.fa-eraser:before {
+ content: "\f12d";
+}
+.fa-puzzle-piece:before {
+ content: "\f12e";
+}
+.fa-microphone:before {
+ content: "\f130";
+}
+.fa-microphone-slash:before {
+ content: "\f131";
+}
+.fa-shield:before {
+ content: "\f132";
+}
+.fa-calendar-o:before {
+ content: "\f133";
+}
+.fa-fire-extinguisher:before {
+ content: "\f134";
+}
+.fa-rocket:before {
+ content: "\f135";
+}
+.fa-maxcdn:before {
+ content: "\f136";
+}
+.fa-chevron-circle-left:before {
+ content: "\f137";
+}
+.fa-chevron-circle-right:before {
+ content: "\f138";
+}
+.fa-chevron-circle-up:before {
+ content: "\f139";
+}
+.fa-chevron-circle-down:before {
+ content: "\f13a";
+}
+.fa-html5:before {
+ content: "\f13b";
+}
+.fa-css3:before {
+ content: "\f13c";
+}
+.fa-anchor:before {
+ content: "\f13d";
+}
+.fa-unlock-alt:before {
+ content: "\f13e";
+}
+.fa-bullseye:before {
+ content: "\f140";
+}
+.fa-ellipsis-h:before {
+ content: "\f141";
+}
+.fa-ellipsis-v:before {
+ content: "\f142";
+}
+.fa-rss-square:before {
+ content: "\f143";
+}
+.fa-play-circle:before {
+ content: "\f144";
+}
+.fa-ticket:before {
+ content: "\f145";
+}
+.fa-minus-square:before {
+ content: "\f146";
+}
+.fa-minus-square-o:before {
+ content: "\f147";
+}
+.fa-level-up:before {
+ content: "\f148";
+}
+.fa-level-down:before {
+ content: "\f149";
+}
+.fa-check-square:before {
+ content: "\f14a";
+}
+.fa-pencil-square:before {
+ content: "\f14b";
+}
+.fa-external-link-square:before {
+ content: "\f14c";
+}
+.fa-share-square:before {
+ content: "\f14d";
+}
+.fa-compass:before {
+ content: "\f14e";
+}
+.fa-toggle-down:before,
+.fa-caret-square-o-down:before {
+ content: "\f150";
+}
+.fa-toggle-up:before,
+.fa-caret-square-o-up:before {
+ content: "\f151";
+}
+.fa-toggle-right:before,
+.fa-caret-square-o-right:before {
+ content: "\f152";
+}
+.fa-euro:before,
+.fa-eur:before {
+ content: "\f153";
+}
+.fa-gbp:before {
+ content: "\f154";
+}
+.fa-dollar:before,
+.fa-usd:before {
+ content: "\f155";
+}
+.fa-rupee:before,
+.fa-inr:before {
+ content: "\f156";
+}
+.fa-cny:before,
+.fa-rmb:before,
+.fa-yen:before,
+.fa-jpy:before {
+ content: "\f157";
+}
+.fa-ruble:before,
+.fa-rouble:before,
+.fa-rub:before {
+ content: "\f158";
+}
+.fa-won:before,
+.fa-krw:before {
+ content: "\f159";
+}
+.fa-bitcoin:before,
+.fa-btc:before {
+ content: "\f15a";
+}
+.fa-file:before {
+ content: "\f15b";
+}
+.fa-file-text:before {
+ content: "\f15c";
+}
+.fa-sort-alpha-asc:before {
+ content: "\f15d";
+}
+.fa-sort-alpha-desc:before {
+ content: "\f15e";
+}
+.fa-sort-amount-asc:before {
+ content: "\f160";
+}
+.fa-sort-amount-desc:before {
+ content: "\f161";
+}
+.fa-sort-numeric-asc:before {
+ content: "\f162";
+}
+.fa-sort-numeric-desc:before {
+ content: "\f163";
+}
+.fa-thumbs-up:before {
+ content: "\f164";
+}
+.fa-thumbs-down:before {
+ content: "\f165";
+}
+.fa-youtube-square:before {
+ content: "\f166";
+}
+.fa-youtube:before {
+ content: "\f167";
+}
+.fa-xing:before {
+ content: "\f168";
+}
+.fa-xing-square:before {
+ content: "\f169";
+}
+.fa-youtube-play:before {
+ content: "\f16a";
+}
+.fa-dropbox:before {
+ content: "\f16b";
+}
+.fa-stack-overflow:before {
+ content: "\f16c";
+}
+.fa-instagram:before {
+ content: "\f16d";
+}
+.fa-flickr:before {
+ content: "\f16e";
+}
+.fa-adn:before {
+ content: "\f170";
+}
+.fa-bitbucket:before {
+ content: "\f171";
+}
+.fa-bitbucket-square:before {
+ content: "\f172";
+}
+.fa-tumblr:before {
+ content: "\f173";
+}
+.fa-tumblr-square:before {
+ content: "\f174";
+}
+.fa-long-arrow-down:before {
+ content: "\f175";
+}
+.fa-long-arrow-up:before {
+ content: "\f176";
+}
+.fa-long-arrow-left:before {
+ content: "\f177";
+}
+.fa-long-arrow-right:before {
+ content: "\f178";
+}
+.fa-apple:before {
+ content: "\f179";
+}
+.fa-windows:before {
+ content: "\f17a";
+}
+.fa-android:before {
+ content: "\f17b";
+}
+.fa-linux:before {
+ content: "\f17c";
+}
+.fa-dribbble:before {
+ content: "\f17d";
+}
+.fa-skype:before {
+ content: "\f17e";
+}
+.fa-foursquare:before {
+ content: "\f180";
+}
+.fa-trello:before {
+ content: "\f181";
+}
+.fa-female:before {
+ content: "\f182";
+}
+.fa-male:before {
+ content: "\f183";
+}
+.fa-gittip:before {
+ content: "\f184";
+}
+.fa-sun-o:before {
+ content: "\f185";
+}
+.fa-moon-o:before {
+ content: "\f186";
+}
+.fa-archive:before {
+ content: "\f187";
+}
+.fa-bug:before {
+ content: "\f188";
+}
+.fa-vk:before {
+ content: "\f189";
+}
+.fa-weibo:before {
+ content: "\f18a";
+}
+.fa-renren:before {
+ content: "\f18b";
+}
+.fa-pagelines:before {
+ content: "\f18c";
+}
+.fa-stack-exchange:before {
+ content: "\f18d";
+}
+.fa-arrow-circle-o-right:before {
+ content: "\f18e";
+}
+.fa-arrow-circle-o-left:before {
+ content: "\f190";
+}
+.fa-toggle-left:before,
+.fa-caret-square-o-left:before {
+ content: "\f191";
+}
+.fa-dot-circle-o:before {
+ content: "\f192";
+}
+.fa-wheelchair:before {
+ content: "\f193";
+}
+.fa-vimeo-square:before {
+ content: "\f194";
+}
+.fa-turkish-lira:before,
+.fa-try:before {
+ content: "\f195";
+}
+.fa-plus-square-o:before {
+ content: "\f196";
+}
diff --git a/doc/website-v1/css/font-awesome.min.css b/doc/website-v1/css/font-awesome.min.css
new file mode 100644
index 0000000..449d6ac
--- /dev/null
+++ b/doc/website-v1/css/font-awesome.min.css
@@ -0,0 +1,4 @@
+/*!
+ * Font Awesome 4.0.3 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.0.3');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.0.3') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff?v=4.0.3') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.0.3') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.0.3#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.3333333333333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.2857142857142858em;text-align:center}.fa-ul{padding-left:0;margin-left:2.142857142857143em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.142857142857143em;width:2.142857142857143em;top:.14285714285714285em;text-align:center}.fa-li.fa-lg{left:-1.8571428571428572em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:spin 2s infinite linear;-moz-animation:spin 2s infinite linear;-o-animation:spin 2s infinite linear;animation:spin 2s infinite linear}@-moz-keyframes spin{0%{-moz-transform:rotate(0deg)}100%{-moz-transform:rotate(359deg)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg)}}@-o-keyframes spin{0%{-o-transform:rotate(0deg)}100%{-o-transform:rotate(359deg)}}@-ms-keyframes spin{0%{-ms-transform:rotate(0deg)}100%{-ms-transform:rotate(359deg)}}@keyframes spin{0%{transform:rotate(0deg)}100%{transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0,mirror=1);-webkit-transform:scale(-1,1);-moz-transform:scale(-1,1);-ms-transform:scale(-1,1);-o-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2,mirror=1);-webkit-transform:scale(1,-1);-moz-transform:scale(1,-1);-ms-transform:scale(1,-1);-o-transform:scale(1,-1);transform:scale(1,-1)}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-asc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-desc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-reply-all:before{content:"\f122"}.fa-mail-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"} \ No newline at end of file
diff --git a/doc/website-v1/development.adoc b/doc/website-v1/development.adoc
new file mode 100644
index 0000000..a334a6b
--- /dev/null
+++ b/doc/website-v1/development.adoc
@@ -0,0 +1,74 @@
+= Development =
+
+== Tools ==
+
+++++
+<ul class="nav">
+<li><a href="https://github.com/ClusterLabs/crmsh"><i class="fa fa-code-fork fa-3x fa-fw"></i> Source Repository</a></li>
+<li><a href="http://clusterlabs.org/mailman/listinfo/users"><i class="fa fa-envelope fa-3x fa-fw"></i> Mailing List</a></li>
+<li><a href="https://github.com/ClusterLabs/crmsh/issues"><i class="fa fa-bug fa-3x fa-fw"></i> Issue Tracker</a></li>
+<li><a href="irc://freenode.net/#clusterlabs"><i class="fa fa-comments fa-3x fa-fw"></i> IRC: #clusterlabs on Freenode</a></li>
+<li><a href="https://github.com/ClusterLabs/crmsh/commits/master.atom"><i class="fa fa-rss fa-3x fa-fw"></i> Atom feed</a></li>
+</ul>
+++++
+
+== Source Code ==
+
+The source code for `crmsh` is kept in a
+http://git-scm.com/[git] repository
+hosted at https://github.com[github]. Use +git+ to get a working copy:
+
+----
+git clone https://github.com/ClusterLabs/crmsh.git
+----
+
+Dependencies
+~~~~~~~~~~~~
+
+Building and installing crmsh requires Python version 2.6 and up (but not 3, yet).
+
+Additionally, the following Python modules are needed:
+
+* `lxml`
+* `PyYAML`
+* `setuptools`
+* `parallax`
+* `python-dateutil`
+
+Building
+~~~~~~~~
+
+`crmsh` uses the autotools suite to manage the build process.
+
+----
+./autogen.sh
+./configure
+make
+make install
+----
+
+=== Tests ===
+
+The unit tests for `crmsh` require +nose+ to run. On most distributions, this can be installed
+by installing the package +python-nose+, or using +pip+.
+
+To run the unit test suite, go to the source code directory of `crmsh`
+and call:
+
+----
+./test/run
+----
+
+`crmsh` also comes with a comprehensive regression test suite. The regression tests need
+to run after installation, on a system which has both crmsh and pacemaker installed. You
+will also need to install +pacemaker+ and the +cluster-glue+ development headers.
+
+* link:http://hg.linux-ha.org/glue[cluster-glue]
+* link:https://github.com/ClusterLabs/pacemaker[pacemaker]
+
+To execute the tests, call:
+
+----
+/usr/share/crmsh/tests/regression.sh
+cat crmtestout/regression.out
+----
diff --git a/doc/website-v1/documentation.adoc b/doc/website-v1/documentation.adoc
new file mode 100644
index 0000000..dce5a0d
--- /dev/null
+++ b/doc/website-v1/documentation.adoc
@@ -0,0 +1,42 @@
+= Documentation =
+
+The main documentation for `crmsh` comes in the form of the
+`manual`, which is the same help as found using the `help`
+command in the interactive shell.
+
+Additionally, there are a couple of guides and other documents
+that will hopefully make using the shell as easy as possible.
+
+== Manual ==
+
+* link:/man[Manual (Development)]
+* link:/man-4.3[Manual (v4.3.x)]
+* link:/man-3[Manual (v3.x)]
+* link:/man-2.0[Manual (v2.x)]
+* link:/man-1.2[Manual (v1.2.x)]
+
+== Guides ==
+
+* link:/start-guide[Getting Started]
+* link:/history-guide[History Guide]
+* link:/rsctest-guide[Resource Testing Guide]
+* link:/configuration[Configuration]
+* link:/scripts[Cluster scripts]
+* link:/faq[Frequently Asked Questions]
+
+== Translations ==
+
+* https://blog.3ware.co.jp/2015/05/crmsh-getting-started/[Getting Started (Japanese)]
+
+== External documentation ==
+
+The SUSE
+https://www.suse.com/documentation/sle_ha/book_sleha/?page=/documentation/sle_ha/book_sleha/data/book_sleha.html[High
+Availability Guide] provides a guide to
+installing and configuring a complete cluster solution including both
+the `crm` shell and Hawk, the web GUI which uses the `crm` shell as
+its backend.
+
+For more information on Pacemaker in general, see the
+http://clusterlabs.org/doc/[Pacemaker documentation] at `clusterlabs.org`.
+
diff --git a/doc/website-v1/download.adoc b/doc/website-v1/download.adoc
new file mode 100644
index 0000000..8d81153
--- /dev/null
+++ b/doc/website-v1/download.adoc
@@ -0,0 +1,40 @@
+= Download =
+
+The easiest way to install `crmsh` is via the package manager of your distribution.
+
+== SLES / openSUSE ==
+
+`crmsh` is commercially supported on SLE via the https://www.suse.com/products/highavailability/[SUSE Linux Enterprise High Availability Extension]. It is also available for openSUSE with the package name `crmsh`. Development packages can be downloaded from the OBS:
+
+* https://build.opensuse.org/package/show/network:ha-clustering:Stable/crmsh[Stable version]
+* https://build.opensuse.org/package/show/network:ha-clustering:Factory/crmsh[Development version]
+
+== Red Hat / CentOS / Fedora ==
+
+We try to build Red Hat / CentOS / Fedora-compatible RPM packages on the OBS (see above).
+
+=== CentOS 7 ===
+
+---
+dnf config-manager --add-repo http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/network:ha-clustering:Stable.repo
+---
+
+== Debian ==
+
+The versions of `crmsh` and `pacemaker` currently available in the latest Debian release are quite old. Newer packages are available via the Debian-HA team https://wiki.debian.org/Debian-HA[wiki] and the distribution packages will hopefully be updated soon.
+
+== Ubuntu ==
+
+Packages for `crmsh` are available from the https://launchpad.net/ubuntu/+source/crmsh[Launchpad].
+
+== Gentoo ==
+
+A fairly up-to-date version is available https://packages.gentoo.org/packages/sys-cluster/crmsh[here].
+
+== Arch ==
+
+`crmsh` is available via the https://aur.archlinux.org/packages/ha-pacemaker-crmsh/[AUR]. Unfortunately the package seems somewhat out of date.
+
+== Source Packages ==
+
+Releases are available as `.tar.gz` or `.zip` archives via https://github.com/ClusterLabs/crmsh/releases[Github].
diff --git a/doc/website-v1/faq.adoc b/doc/website-v1/faq.adoc
new file mode 100644
index 0000000..c9c5d02
--- /dev/null
+++ b/doc/website-v1/faq.adoc
@@ -0,0 +1,60 @@
+= Frequently Asked Questions
+
+== What is the crm shell?
+
+The `crm` shell is a command-line interface to the Pacemaker cluster
+resource management stack. If that doesn't make any sense to you, the
+easiest way to get up to speed is to go to the
+http://clusterlabs.org/[Pacemaker] website and read more about what it
+does there.
+
+The `crm` shell provides a simpler interface to configuring Pacemaker
+than manipulating the XML of the CIB (Cluster Information Base)
+directly. With its command-line style interface, changes to the
+cluster can be performed quickly and painlessly. It also works as a
+scripting tool, allowing more complicated changes to be applied to the
+cluster.
+
+The `crm` shell also functions as a management console, providing a
+unified interface to a number of other auxilliary tools related to
+Pacemaker and cluster management.
+
+== What distributions does the shell run on?
+
+Many distributions provide packages for the `crm` shell in their
+respective package repositories. The best and safest way to obtain the
+`crm` shell is via the distribution repositories, so look there first.
+
+The intention is for the `crm` shell to work well on all the major
+distributions. Pre-built packages are provided for the
+following distros:
+
+ * openSUSE
+ * Fedora
+ * CentOS
+ * Red Hat Linux
+
+More information can be found on the
+link:/documentation#_installation[Documentation] page.
+
+== Didn't crm use to be part of Pacemaker?
+
+Yes, initially, the `crm` shell was distributed as part of the
+Pacemaker project. It was split into its own, separate project in
+2011.
+
+A common misconception is that `crm` has been replaced by `pcs`
+(available at https://github.com/feist/pcs[github.com/feist/pcs]). `pcs`
+is an alternative command line interface similar to `crm`. Both
+projects are being actively developed, with slightly different
+goals. Our recommendation is to use whatever shell your distribution
+of choice comes with and supports, unless you have a particular
+preference or are on a distribution which doesn't bundle either. In
+that case, we are obviously biased towards one of the available
+choices. ;)
+
+== Command-line is well and good, but is there a web interface?
+
+Yes! Take a look at https://github.com/ClusterLabs/hawk[Hawk].
+
+Hawk uses the `crm` shell as its backend to interact with the cluster.
diff --git a/doc/website-v1/fonts/FontAwesome.otf b/doc/website-v1/fonts/FontAwesome.otf
new file mode 100644
index 0000000..8b0f54e
--- /dev/null
+++ b/doc/website-v1/fonts/FontAwesome.otf
Binary files differ
diff --git a/doc/website-v1/fonts/fontawesome-webfont.eot b/doc/website-v1/fonts/fontawesome-webfont.eot
new file mode 100755
index 0000000..7c79c6a
--- /dev/null
+++ b/doc/website-v1/fonts/fontawesome-webfont.eot
Binary files differ
diff --git a/doc/website-v1/fonts/fontawesome-webfont.svg b/doc/website-v1/fonts/fontawesome-webfont.svg
new file mode 100755
index 0000000..45fdf33
--- /dev/null
+++ b/doc/website-v1/fonts/fontawesome-webfont.svg
@@ -0,0 +1,414 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="fontawesomeregular" horiz-adv-x="1536" >
+<font-face units-per-em="1792" ascent="1536" descent="-256" />
+<missing-glyph horiz-adv-x="448" />
+<glyph unicode=" " horiz-adv-x="448" />
+<glyph unicode="&#x09;" horiz-adv-x="448" />
+<glyph unicode="&#xa0;" horiz-adv-x="448" />
+<glyph unicode="&#xa8;" horiz-adv-x="1792" />
+<glyph unicode="&#xa9;" horiz-adv-x="1792" />
+<glyph unicode="&#xae;" horiz-adv-x="1792" />
+<glyph unicode="&#xb4;" horiz-adv-x="1792" />
+<glyph unicode="&#xc6;" horiz-adv-x="1792" />
+<glyph unicode="&#x2000;" horiz-adv-x="768" />
+<glyph unicode="&#x2001;" />
+<glyph unicode="&#x2002;" horiz-adv-x="768" />
+<glyph unicode="&#x2003;" />
+<glyph unicode="&#x2004;" horiz-adv-x="512" />
+<glyph unicode="&#x2005;" horiz-adv-x="384" />
+<glyph unicode="&#x2006;" horiz-adv-x="256" />
+<glyph unicode="&#x2007;" horiz-adv-x="256" />
+<glyph unicode="&#x2008;" horiz-adv-x="192" />
+<glyph unicode="&#x2009;" horiz-adv-x="307" />
+<glyph unicode="&#x200a;" horiz-adv-x="85" />
+<glyph unicode="&#x202f;" horiz-adv-x="307" />
+<glyph unicode="&#x205f;" horiz-adv-x="384" />
+<glyph unicode="&#x2122;" horiz-adv-x="1792" />
+<glyph unicode="&#x221e;" horiz-adv-x="1792" />
+<glyph unicode="&#x2260;" horiz-adv-x="1792" />
+<glyph unicode="&#xe000;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xf000;" horiz-adv-x="1792" d="M1699 1350q0 -35 -43 -78l-632 -632v-768h320q26 0 45 -19t19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45t45 19h320v768l-632 632q-43 43 -43 78q0 23 18 36.5t38 17.5t43 4h1408q23 0 43 -4t38 -17.5t18 -36.5z" />
+<glyph unicode="&#xf001;" d="M1536 1312v-1120q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89t34 89t86 60.5t103.5 32t96.5 10.5q105 0 192 -39v537l-768 -237v-709q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89 t34 89t86 60.5t103.5 32t96.5 10.5q105 0 192 -39v967q0 31 19 56.5t49 35.5l832 256q12 4 28 4q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf002;" horiz-adv-x="1664" d="M1152 704q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5zM1664 -128q0 -52 -38 -90t-90 -38q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5 t55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90z" />
+<glyph unicode="&#xf003;" horiz-adv-x="1792" d="M1664 32v768q-32 -36 -69 -66q-268 -206 -426 -338q-51 -43 -83 -67t-86.5 -48.5t-102.5 -24.5h-1h-1q-48 0 -102.5 24.5t-86.5 48.5t-83 67q-158 132 -426 338q-37 30 -69 66v-768q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5zM1664 1083v11v13.5t-0.5 13 t-3 12.5t-5.5 9t-9 7.5t-14 2.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5q0 -168 147 -284q193 -152 401 -317q6 -5 35 -29.5t46 -37.5t44.5 -31.5t50.5 -27.5t43 -9h1h1q20 0 43 9t50.5 27.5t44.5 31.5t46 37.5t35 29.5q208 165 401 317q54 43 100.5 115.5t46.5 131.5z M1792 1120v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf004;" horiz-adv-x="1792" d="M896 -128q-26 0 -44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600 q-18 -18 -44 -18z" />
+<glyph unicode="&#xf005;" horiz-adv-x="1664" d="M1664 889q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -21 -10.5 -35.5t-30.5 -14.5q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354q-25 27 -25 48q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455 l502 -73q56 -9 56 -46z" />
+<glyph unicode="&#xf006;" horiz-adv-x="1664" d="M1137 532l306 297l-422 62l-189 382l-189 -382l-422 -62l306 -297l-73 -421l378 199l377 -199zM1664 889q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -50 -41 -50q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500 l-364 354q-25 27 -25 48q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46z" />
+<glyph unicode="&#xf007;" horiz-adv-x="1408" d="M1408 131q0 -120 -73 -189.5t-194 -69.5h-874q-121 0 -194 69.5t-73 189.5q0 53 3.5 103.5t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q9 0 42 -21.5t74.5 -48t108 -48t133.5 -21.5t133.5 21.5t108 48t74.5 48t42 21.5q61 0 111.5 -20t85.5 -53.5t62 -81 t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5zM1088 1024q0 -159 -112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5z" />
+<glyph unicode="&#xf008;" horiz-adv-x="1920" d="M384 -64v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM384 320v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM384 704v128q0 26 -19 45t-45 19h-128 q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1408 -64v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512q0 -26 19 -45t45 -19h768q26 0 45 19t19 45zM384 1088v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45 t45 -19h128q26 0 45 19t19 45zM1792 -64v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1408 704v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512q0 -26 19 -45t45 -19h768q26 0 45 19t19 45zM1792 320v128 q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1792 704v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1792 1088v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19 t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1920 1248v-1344q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113v1344q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf009;" horiz-adv-x="1664" d="M768 512v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90zM768 1280v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90zM1664 512v-384q0 -52 -38 -90t-90 -38 h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90zM1664 1280v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90z" />
+<glyph unicode="&#xf00a;" horiz-adv-x="1792" d="M512 288v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM512 800v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1152 288v-192q0 -40 -28 -68t-68 -28h-320 q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM512 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1152 800v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28 h320q40 0 68 -28t28 -68zM1792 288v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1152 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 800v-192 q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf00b;" horiz-adv-x="1792" d="M512 288v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM512 800v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 288v-192q0 -40 -28 -68t-68 -28h-960 q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68zM512 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 800v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v192q0 40 28 68t68 28 h960q40 0 68 -28t28 -68zM1792 1312v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf00c;" horiz-adv-x="1792" d="M1671 970q0 -40 -28 -68l-724 -724l-136 -136q-28 -28 -68 -28t-68 28l-136 136l-362 362q-28 28 -28 68t28 68l136 136q28 28 68 28t68 -28l294 -295l656 657q28 28 68 28t68 -28l136 -136q28 -28 28 -68z" />
+<glyph unicode="&#xf00d;" horiz-adv-x="1408" d="M1298 214q0 -40 -28 -68l-136 -136q-28 -28 -68 -28t-68 28l-294 294l-294 -294q-28 -28 -68 -28t-68 28l-136 136q-28 28 -28 68t28 68l294 294l-294 294q-28 28 -28 68t28 68l136 136q28 28 68 28t68 -28l294 -294l294 294q28 28 68 28t68 -28l136 -136q28 -28 28 -68 t-28 -68l-294 -294l294 -294q28 -28 28 -68z" />
+<glyph unicode="&#xf00e;" horiz-adv-x="1664" d="M1024 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-224q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v224h-224q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h224v224q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-224h224 q13 0 22.5 -9.5t9.5 -22.5zM1152 704q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5zM1664 -128q0 -53 -37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5 t-225 150t-150 225t-55.5 273.5t55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90z" />
+<glyph unicode="&#xf010;" horiz-adv-x="1664" d="M1024 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-576q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h576q13 0 22.5 -9.5t9.5 -22.5zM1152 704q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5z M1664 -128q0 -53 -37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5t55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90z " />
+<glyph unicode="&#xf011;" d="M1536 640q0 -156 -61 -298t-164 -245t-245 -164t-298 -61t-298 61t-245 164t-164 245t-61 298q0 182 80.5 343t226.5 270q43 32 95.5 25t83.5 -50q32 -42 24.5 -94.5t-49.5 -84.5q-98 -74 -151.5 -181t-53.5 -228q0 -104 40.5 -198.5t109.5 -163.5t163.5 -109.5 t198.5 -40.5t198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5q0 121 -53.5 228t-151.5 181q-42 32 -49.5 84.5t24.5 94.5q31 43 84 50t95 -25q146 -109 226.5 -270t80.5 -343zM896 1408v-640q0 -52 -38 -90t-90 -38t-90 38t-38 90v640q0 52 38 90t90 38t90 -38t38 -90z" />
+<glyph unicode="&#xf012;" horiz-adv-x="1792" d="M256 96v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM640 224v-320q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v320q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1024 480v-576q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23 v576q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1408 864v-960q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v960q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1792 1376v-1472q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v1472q0 14 9 23t23 9h192q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf013;" d="M1024 640q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM1536 749v-222q0 -12 -8 -23t-20 -13l-185 -28q-19 -54 -39 -91q35 -50 107 -138q10 -12 10 -25t-9 -23q-27 -37 -99 -108t-94 -71q-12 0 -26 9l-138 108q-44 -23 -91 -38 q-16 -136 -29 -186q-7 -28 -36 -28h-222q-14 0 -24.5 8.5t-11.5 21.5l-28 184q-49 16 -90 37l-141 -107q-10 -9 -25 -9q-14 0 -25 11q-126 114 -165 168q-7 10 -7 23q0 12 8 23q15 21 51 66.5t54 70.5q-27 50 -41 99l-183 27q-13 2 -21 12.5t-8 23.5v222q0 12 8 23t19 13 l186 28q14 46 39 92q-40 57 -107 138q-10 12 -10 24q0 10 9 23q26 36 98.5 107.5t94.5 71.5q13 0 26 -10l138 -107q44 23 91 38q16 136 29 186q7 28 36 28h222q14 0 24.5 -8.5t11.5 -21.5l28 -184q49 -16 90 -37l142 107q9 9 24 9q13 0 25 -10q129 -119 165 -170q7 -8 7 -22 q0 -12 -8 -23q-15 -21 -51 -66.5t-54 -70.5q26 -50 41 -98l183 -28q13 -2 21 -12.5t8 -23.5z" />
+<glyph unicode="&#xf014;" horiz-adv-x="1408" d="M512 800v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM768 800v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1024 800v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576 q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1152 76v948h-896v-948q0 -22 7 -40.5t14.5 -27t10.5 -8.5h832q3 0 10.5 8.5t14.5 27t7 40.5zM480 1152h448l-48 117q-7 9 -17 11h-317q-10 -2 -17 -11zM1408 1120v-64q0 -14 -9 -23t-23 -9h-96v-948q0 -83 -47 -143.5t-113 -60.5h-832 q-66 0 -113 58.5t-47 141.5v952h-96q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h309l70 167q15 37 54 63t79 26h320q40 0 79 -26t54 -63l70 -167h309q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf015;" horiz-adv-x="1664" d="M1408 544v-480q0 -26 -19 -45t-45 -19h-384v384h-256v-384h-384q-26 0 -45 19t-19 45v480q0 1 0.5 3t0.5 3l575 474l575 -474q1 -2 1 -6zM1631 613l-62 -74q-8 -9 -21 -11h-3q-13 0 -21 7l-692 577l-692 -577q-12 -8 -24 -7q-13 2 -21 11l-62 74q-8 10 -7 23.5t11 21.5 l719 599q32 26 76 26t76 -26l244 -204v195q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-408l219 -182q10 -8 11 -21.5t-7 -23.5z" />
+<glyph unicode="&#xf016;" horiz-adv-x="1280" d="M128 0h1024v768h-416q-40 0 -68 28t-28 68v416h-512v-1280zM768 896h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376zM1280 864v-896q0 -40 -28 -68t-68 -28h-1088q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h640q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88z " />
+<glyph unicode="&#xf017;" d="M896 992v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h224v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf018;" horiz-adv-x="1920" d="M1111 540v4l-24 320q-1 13 -11 22.5t-23 9.5h-186q-13 0 -23 -9.5t-11 -22.5l-24 -320v-4q-1 -12 8 -20t21 -8h244q12 0 21 8t8 20zM1870 73q0 -73 -46 -73h-704q13 0 22 9.5t8 22.5l-20 256q-1 13 -11 22.5t-23 9.5h-272q-13 0 -23 -9.5t-11 -22.5l-20 -256 q-1 -13 8 -22.5t22 -9.5h-704q-46 0 -46 73q0 54 26 116l417 1044q8 19 26 33t38 14h339q-13 0 -23 -9.5t-11 -22.5l-15 -192q-1 -14 8 -23t22 -9h166q13 0 22 9t8 23l-15 192q-1 13 -11 22.5t-23 9.5h339q20 0 38 -14t26 -33l417 -1044q26 -62 26 -116z" />
+<glyph unicode="&#xf019;" horiz-adv-x="1664" d="M1280 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1536 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 416v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h465l135 -136 q58 -56 136 -56t136 56l136 136h464q40 0 68 -28t28 -68zM1339 985q17 -41 -14 -70l-448 -448q-18 -19 -45 -19t-45 19l-448 448q-31 29 -14 70q17 39 59 39h256v448q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-448h256q42 0 59 -39z" />
+<glyph unicode="&#xf01a;" d="M1120 608q0 -12 -10 -24l-319 -319q-11 -9 -23 -9t-23 9l-320 320q-15 16 -7 35q8 20 30 20h192v352q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-352h192q14 0 23 -9t9 -23zM768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273 t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf01b;" d="M1118 660q-8 -20 -30 -20h-192v-352q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v352h-192q-14 0 -23 9t-9 23q0 12 10 24l319 319q11 9 23 9t23 -9l320 -320q15 -16 7 -35zM768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198 t73 273t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf01c;" d="M1023 576h316q-1 3 -2.5 8t-2.5 8l-212 496h-708l-212 -496q-1 -2 -2.5 -8t-2.5 -8h316l95 -192h320zM1536 546v-482q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v482q0 62 25 123l238 552q10 25 36.5 42t52.5 17h832q26 0 52.5 -17t36.5 -42l238 -552 q25 -61 25 -123z" />
+<glyph unicode="&#xf01d;" d="M1184 640q0 -37 -32 -55l-544 -320q-15 -9 -32 -9q-16 0 -32 8q-32 19 -32 56v640q0 37 32 56q33 18 64 -1l544 -320q32 -18 32 -55zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf01e;" d="M1536 1280v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l138 138q-148 137 -349 137q-104 0 -198.5 -40.5t-163.5 -109.5t-109.5 -163.5t-40.5 -198.5t40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5q119 0 225 52t179 147q7 10 23 12q14 0 25 -9 l137 -138q9 -8 9.5 -20.5t-7.5 -22.5q-109 -132 -264 -204.5t-327 -72.5q-156 0 -298 61t-245 164t-164 245t-61 298t61 298t164 245t245 164t298 61q147 0 284.5 -55.5t244.5 -156.5l130 129q29 31 70 14q39 -17 39 -59z" />
+<glyph unicode="&#xf021;" d="M1511 480q0 -5 -1 -7q-64 -268 -268 -434.5t-478 -166.5q-146 0 -282.5 55t-243.5 157l-129 -129q-19 -19 -45 -19t-45 19t-19 45v448q0 26 19 45t45 19h448q26 0 45 -19t19 -45t-19 -45l-137 -137q71 -66 161 -102t187 -36q134 0 250 65t186 179q11 17 53 117 q8 23 30 23h192q13 0 22.5 -9.5t9.5 -22.5zM1536 1280v-448q0 -26 -19 -45t-45 -19h-448q-26 0 -45 19t-19 45t19 45l138 138q-148 137 -349 137q-134 0 -250 -65t-186 -179q-11 -17 -53 -117q-8 -23 -30 -23h-199q-13 0 -22.5 9.5t-9.5 22.5v7q65 268 270 434.5t480 166.5 q146 0 284 -55.5t245 -156.5l130 129q19 19 45 19t45 -19t19 -45z" />
+<glyph unicode="&#xf022;" horiz-adv-x="1792" d="M384 352v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 608v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M384 864v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1536 352v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5z M1536 608v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5zM1536 864v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5 t9.5 -22.5zM1664 160v832q0 13 -9.5 22.5t-22.5 9.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5v-832q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5zM1792 1248v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1472q66 0 113 -47 t47 -113z" />
+<glyph unicode="&#xf023;" horiz-adv-x="1152" d="M320 768h512v192q0 106 -75 181t-181 75t-181 -75t-75 -181v-192zM1152 672v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v576q0 40 28 68t68 28h32v192q0 184 132 316t316 132t316 -132t132 -316v-192h32q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf024;" horiz-adv-x="1792" d="M320 1280q0 -72 -64 -110v-1266q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v1266q-64 38 -64 110q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1792 1216v-763q0 -25 -12.5 -38.5t-39.5 -27.5q-215 -116 -369 -116q-61 0 -123.5 22t-108.5 48 t-115.5 48t-142.5 22q-192 0 -464 -146q-17 -9 -33 -9q-26 0 -45 19t-19 45v742q0 32 31 55q21 14 79 43q236 120 421 120q107 0 200 -29t219 -88q38 -19 88 -19q54 0 117.5 21t110 47t88 47t54.5 21q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf025;" horiz-adv-x="1664" d="M1664 650q0 -166 -60 -314l-20 -49l-185 -33q-22 -83 -90.5 -136.5t-156.5 -53.5v-32q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-32q71 0 130 -35.5t93 -95.5l68 12q29 95 29 193q0 148 -88 279t-236.5 209t-315.5 78 t-315.5 -78t-236.5 -209t-88 -279q0 -98 29 -193l68 -12q34 60 93 95.5t130 35.5v32q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v32q-88 0 -156.5 53.5t-90.5 136.5l-185 33l-20 49q-60 148 -60 314q0 151 67 291t179 242.5 t266 163.5t320 61t320 -61t266 -163.5t179 -242.5t67 -291z" />
+<glyph unicode="&#xf026;" horiz-adv-x="768" d="M768 1184v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45z" />
+<glyph unicode="&#xf027;" horiz-adv-x="1152" d="M768 1184v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45zM1152 640q0 -76 -42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5q0 21 12 35.5t29 25t34 23t29 35.5 t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5q15 0 25 -5q70 -27 112.5 -93t42.5 -142z" />
+<glyph unicode="&#xf028;" horiz-adv-x="1664" d="M768 1184v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45zM1152 640q0 -76 -42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5q0 21 12 35.5t29 25t34 23t29 35.5 t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5q15 0 25 -5q70 -27 112.5 -93t42.5 -142zM1408 640q0 -153 -85 -282.5t-225 -188.5q-13 -5 -25 -5q-27 0 -46 19t-19 45q0 39 39 59q56 29 76 44q74 54 115.5 135.5t41.5 173.5t-41.5 173.5 t-115.5 135.5q-20 15 -76 44q-39 20 -39 59q0 26 19 45t45 19q13 0 26 -5q140 -59 225 -188.5t85 -282.5zM1664 640q0 -230 -127 -422.5t-338 -283.5q-13 -5 -26 -5q-26 0 -45 19t-19 45q0 36 39 59q7 4 22.5 10.5t22.5 10.5q46 25 82 51q123 91 192 227t69 289t-69 289 t-192 227q-36 26 -82 51q-7 4 -22.5 10.5t-22.5 10.5q-39 23 -39 59q0 26 19 45t45 19q13 0 26 -5q211 -91 338 -283.5t127 -422.5z" />
+<glyph unicode="&#xf029;" horiz-adv-x="1408" d="M384 384v-128h-128v128h128zM384 1152v-128h-128v128h128zM1152 1152v-128h-128v128h128zM128 129h384v383h-384v-383zM128 896h384v384h-384v-384zM896 896h384v384h-384v-384zM640 640v-640h-640v640h640zM1152 128v-128h-128v128h128zM1408 128v-128h-128v128h128z M1408 640v-384h-384v128h-128v-384h-128v640h384v-128h128v128h128zM640 1408v-640h-640v640h640zM1408 1408v-640h-640v640h640z" />
+<glyph unicode="&#xf02a;" horiz-adv-x="1792" d="M63 0h-63v1408h63v-1408zM126 1h-32v1407h32v-1407zM220 1h-31v1407h31v-1407zM377 1h-31v1407h31v-1407zM534 1h-62v1407h62v-1407zM660 1h-31v1407h31v-1407zM723 1h-31v1407h31v-1407zM786 1h-31v1407h31v-1407zM943 1h-63v1407h63v-1407zM1100 1h-63v1407h63v-1407z M1226 1h-63v1407h63v-1407zM1352 1h-63v1407h63v-1407zM1446 1h-63v1407h63v-1407zM1635 1h-94v1407h94v-1407zM1698 1h-32v1407h32v-1407zM1792 0h-63v1408h63v-1408z" />
+<glyph unicode="&#xf02b;" d="M448 1088q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1515 512q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5 l715 -714q37 -39 37 -91z" />
+<glyph unicode="&#xf02c;" horiz-adv-x="1920" d="M448 1088q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1515 512q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5 l715 -714q37 -39 37 -91zM1899 512q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-36 0 -59 14t-53 45l470 470q37 37 37 90q0 52 -37 91l-715 714q-38 38 -102 64.5t-117 26.5h224q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91z" />
+<glyph unicode="&#xf02d;" horiz-adv-x="1664" d="M1639 1058q40 -57 18 -129l-275 -906q-19 -64 -76.5 -107.5t-122.5 -43.5h-923q-77 0 -148.5 53.5t-99.5 131.5q-24 67 -2 127q0 4 3 27t4 37q1 8 -3 21.5t-3 19.5q2 11 8 21t16.5 23.5t16.5 23.5q23 38 45 91.5t30 91.5q3 10 0.5 30t-0.5 28q3 11 17 28t17 23 q21 36 42 92t25 90q1 9 -2.5 32t0.5 28q4 13 22 30.5t22 22.5q19 26 42.5 84.5t27.5 96.5q1 8 -3 25.5t-2 26.5q2 8 9 18t18 23t17 21q8 12 16.5 30.5t15 35t16 36t19.5 32t26.5 23.5t36 11.5t47.5 -5.5l-1 -3q38 9 51 9h761q74 0 114 -56t18 -130l-274 -906 q-36 -119 -71.5 -153.5t-128.5 -34.5h-869q-27 0 -38 -15q-11 -16 -1 -43q24 -70 144 -70h923q29 0 56 15.5t35 41.5l300 987q7 22 5 57q38 -15 59 -43zM575 1056q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5 t-16.5 -22.5zM492 800q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5z" />
+<glyph unicode="&#xf02e;" horiz-adv-x="1280" d="M1164 1408q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048z" />
+<glyph unicode="&#xf02f;" horiz-adv-x="1664" d="M384 0h896v256h-896v-256zM384 640h896v384h-160q-40 0 -68 28t-28 68v160h-640v-640zM1536 576q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 576v-416q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-160q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68 v160h-224q-13 0 -22.5 9.5t-9.5 22.5v416q0 79 56.5 135.5t135.5 56.5h64v544q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-256h64q79 0 135.5 -56.5t56.5 -135.5z" />
+<glyph unicode="&#xf030;" horiz-adv-x="1920" d="M960 864q119 0 203.5 -84.5t84.5 -203.5t-84.5 -203.5t-203.5 -84.5t-203.5 84.5t-84.5 203.5t84.5 203.5t203.5 84.5zM1664 1280q106 0 181 -75t75 -181v-896q0 -106 -75 -181t-181 -75h-1408q-106 0 -181 75t-75 181v896q0 106 75 181t181 75h224l51 136 q19 49 69.5 84.5t103.5 35.5h512q53 0 103.5 -35.5t69.5 -84.5l51 -136h224zM960 128q185 0 316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+<glyph unicode="&#xf031;" horiz-adv-x="1664" d="M725 977l-170 -450q73 -1 153.5 -2t119 -1.5t52.5 -0.5l29 2q-32 95 -92 241q-53 132 -92 211zM21 -128h-21l2 79q22 7 80 18q89 16 110 31q20 16 48 68l237 616l280 724h75h53l11 -21l205 -480q103 -242 124 -297q39 -102 96 -235q26 -58 65 -164q24 -67 65 -149 q22 -49 35 -57q22 -19 69 -23q47 -6 103 -27q6 -39 6 -57q0 -14 -1 -26q-80 0 -192 8q-93 8 -189 8q-79 0 -135 -2l-200 -11l-58 -2q0 45 4 78l131 28q56 13 68 23q12 12 12 27t-6 32l-47 114l-92 228l-450 2q-29 -65 -104 -274q-23 -64 -23 -84q0 -31 17 -43 q26 -21 103 -32q3 0 13.5 -2t30 -5t40.5 -6q1 -28 1 -58q0 -17 -2 -27q-66 0 -349 20l-48 -8q-81 -14 -167 -14z" />
+<glyph unicode="&#xf032;" horiz-adv-x="1408" d="M555 15q76 -32 140 -32q131 0 216 41t122 113q38 70 38 181q0 114 -41 180q-58 94 -141 126q-80 32 -247 32q-74 0 -101 -10v-144l-1 -173l3 -270q0 -15 12 -44zM541 761q43 -7 109 -7q175 0 264 65t89 224q0 112 -85 187q-84 75 -255 75q-52 0 -130 -13q0 -44 2 -77 q7 -122 6 -279l-1 -98q0 -43 1 -77zM0 -128l2 94q45 9 68 12q77 12 123 31q17 27 21 51q9 66 9 194l-2 497q-5 256 -9 404q-1 87 -11 109q-1 4 -12 12q-18 12 -69 15q-30 2 -114 13l-4 83l260 6l380 13l45 1q5 0 14 0.5t14 0.5q1 0 21.5 -0.5t40.5 -0.5h74q88 0 191 -27 q43 -13 96 -39q57 -29 102 -76q44 -47 65 -104t21 -122q0 -70 -32 -128t-95 -105q-26 -20 -150 -77q177 -41 267 -146q92 -106 92 -236q0 -76 -29 -161q-21 -62 -71 -117q-66 -72 -140 -108q-73 -36 -203 -60q-82 -15 -198 -11l-197 4q-84 2 -298 -11q-33 -3 -272 -11z" />
+<glyph unicode="&#xf033;" horiz-adv-x="1024" d="M0 -126l17 85q4 1 77 20q76 19 116 39q29 37 41 101l27 139l56 268l12 64q8 44 17 84.5t16 67t12.5 46.5t9 30.5t3.5 11.5l29 157l16 63l22 135l8 50v38q-41 22 -144 28q-28 2 -38 4l19 103l317 -14q39 -2 73 -2q66 0 214 9q33 2 68 4.5t36 2.5q-2 -19 -6 -38 q-7 -29 -13 -51q-55 -19 -109 -31q-64 -16 -101 -31q-12 -31 -24 -88q-9 -44 -13 -82q-44 -199 -66 -306l-61 -311l-38 -158l-43 -235l-12 -45q-2 -7 1 -27q64 -15 119 -21q36 -5 66 -10q-1 -29 -7 -58q-7 -31 -9 -41q-18 0 -23 -1q-24 -2 -42 -2q-9 0 -28 3q-19 4 -145 17 l-198 2q-41 1 -174 -11q-74 -7 -98 -9z" />
+<glyph unicode="&#xf034;" horiz-adv-x="1792" d="M81 1407l54 -27q20 -5 211 -5h130l19 3l115 1l215 -1h293l34 -2q14 -1 28 7t21 16l7 8l42 1q15 0 28 -1v-104.5t1 -131.5l1 -100l-1 -58q0 -32 -4 -51q-39 -15 -68 -18q-25 43 -54 128q-8 24 -15.5 62.5t-11.5 65.5t-6 29q-13 15 -27 19q-7 2 -42.5 2t-103.5 -1t-111 -1 q-34 0 -67 -5q-10 -97 -8 -136l1 -152v-332l3 -359l-1 -147q-1 -46 11 -85q49 -25 89 -32q2 0 18 -5t44 -13t43 -12q30 -8 50 -18q5 -45 5 -50q0 -10 -3 -29q-14 -1 -34 -1q-110 0 -187 10q-72 8 -238 8q-88 0 -233 -14q-48 -4 -70 -4q-2 22 -2 26l-1 26v9q21 33 79 49 q139 38 159 50q9 21 12 56q8 192 6 433l-5 428q-1 62 -0.5 118.5t0.5 102.5t-2 57t-6 15q-6 5 -14 6q-38 6 -148 6q-43 0 -100 -13.5t-73 -24.5q-13 -9 -22 -33t-22 -75t-24 -84q-6 -19 -19.5 -32t-20.5 -13q-44 27 -56 44v297v86zM1744 128q33 0 42 -18.5t-11 -44.5 l-126 -162q-20 -26 -49 -26t-49 26l-126 162q-20 26 -11 44.5t42 18.5h80v1024h-80q-33 0 -42 18.5t11 44.5l126 162q20 26 49 26t49 -26l126 -162q20 -26 11 -44.5t-42 -18.5h-80v-1024h80z" />
+<glyph unicode="&#xf035;" d="M81 1407l54 -27q20 -5 211 -5h130l19 3l115 1l446 -1h318l34 -2q14 -1 28 7t21 16l7 8l42 1q15 0 28 -1v-104.5t1 -131.5l1 -100l-1 -58q0 -32 -4 -51q-39 -15 -68 -18q-25 43 -54 128q-8 24 -15.5 62.5t-11.5 65.5t-6 29q-13 15 -27 19q-7 2 -58.5 2t-138.5 -1t-128 -1 q-94 0 -127 -5q-10 -97 -8 -136l1 -152v52l3 -359l-1 -147q-1 -46 11 -85q49 -25 89 -32q2 0 18 -5t44 -13t43 -12q30 -8 50 -18q5 -45 5 -50q0 -10 -3 -29q-14 -1 -34 -1q-110 0 -187 10q-72 8 -238 8q-82 0 -233 -13q-45 -5 -70 -5q-2 22 -2 26l-1 26v9q21 33 79 49 q139 38 159 50q9 21 12 56q6 137 6 433l-5 44q0 265 -2 278q-2 11 -6 15q-6 5 -14 6q-38 6 -148 6q-50 0 -168.5 -14t-132.5 -24q-13 -9 -22 -33t-22 -75t-24 -84q-6 -19 -19.5 -32t-20.5 -13q-44 27 -56 44v297v86zM1505 113q26 -20 26 -49t-26 -49l-162 -126 q-26 -20 -44.5 -11t-18.5 42v80h-1024v-80q0 -33 -18.5 -42t-44.5 11l-162 126q-26 20 -26 49t26 49l162 126q26 20 44.5 11t18.5 -42v-80h1024v80q0 33 18.5 42t44.5 -11z" />
+<glyph unicode="&#xf036;" horiz-adv-x="1792" d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1408 576v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1280q26 0 45 -19t19 -45zM1664 960v-128q0 -26 -19 -45 t-45 -19h-1536q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1536q26 0 45 -19t19 -45zM1280 1344v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf037;" horiz-adv-x="1792" d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1408 576v-128q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h896q26 0 45 -19t19 -45zM1664 960v-128q0 -26 -19 -45t-45 -19 h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1280 1344v-128q0 -26 -19 -45t-45 -19h-640q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h640q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf038;" horiz-adv-x="1792" d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 576v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1280q26 0 45 -19t19 -45zM1792 960v-128q0 -26 -19 -45 t-45 -19h-1536q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1536q26 0 45 -19t19 -45zM1792 1344v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf039;" horiz-adv-x="1792" d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 576v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 960v-128q0 -26 -19 -45 t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 1344v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf03a;" horiz-adv-x="1792" d="M256 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5zM256 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5 t9.5 -22.5zM256 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1344 q13 0 22.5 -9.5t9.5 -22.5zM256 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5zM1792 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5 t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5zM1792 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5zM1792 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192 q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5z" />
+<glyph unicode="&#xf03b;" horiz-adv-x="1792" d="M384 992v-576q0 -13 -9.5 -22.5t-22.5 -9.5q-14 0 -23 9l-288 288q-9 9 -9 23t9 23l288 288q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5 t9.5 -22.5zM1792 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5zM1792 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088 q13 0 22.5 -9.5t9.5 -22.5zM1792 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5z" />
+<glyph unicode="&#xf03c;" horiz-adv-x="1792" d="M352 704q0 -14 -9 -23l-288 -288q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v576q0 13 9.5 22.5t22.5 9.5q14 0 23 -9l288 -288q9 -9 9 -23zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5 t9.5 -22.5zM1792 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5zM1792 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088 q13 0 22.5 -9.5t9.5 -22.5zM1792 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5z" />
+<glyph unicode="&#xf03d;" horiz-adv-x="1792" d="M1792 1184v-1088q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-403 403v-166q0 -119 -84.5 -203.5t-203.5 -84.5h-704q-119 0 -203.5 84.5t-84.5 203.5v704q0 119 84.5 203.5t203.5 84.5h704q119 0 203.5 -84.5t84.5 -203.5v-165l403 402q18 19 45 19q12 0 25 -5 q39 -17 39 -59z" />
+<glyph unicode="&#xf03e;" horiz-adv-x="1920" d="M640 960q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1664 576v-448h-1408v192l320 320l160 -160l512 512zM1760 1280h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216 q0 13 -9.5 22.5t-22.5 9.5zM1920 1248v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf040;" d="M363 0l91 91l-235 235l-91 -91v-107h128v-128h107zM886 928q0 22 -22 22q-10 0 -17 -7l-542 -542q-7 -7 -7 -17q0 -22 22 -22q10 0 17 7l542 542q7 7 7 17zM832 1120l416 -416l-832 -832h-416v416zM1515 1024q0 -53 -37 -90l-166 -166l-416 416l166 165q36 38 90 38 q53 0 91 -38l235 -234q37 -39 37 -91z" />
+<glyph unicode="&#xf041;" horiz-adv-x="1024" d="M768 896q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM1024 896q0 -109 -33 -179l-364 -774q-16 -33 -47.5 -52t-67.5 -19t-67.5 19t-46.5 52l-365 774q-33 70 -33 179q0 212 150 362t362 150t362 -150t150 -362z" />
+<glyph unicode="&#xf042;" d="M768 96v1088q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf043;" horiz-adv-x="1024" d="M512 384q0 36 -20 69q-1 1 -15.5 22.5t-25.5 38t-25 44t-21 50.5q-4 16 -21 16t-21 -16q-7 -23 -21 -50.5t-25 -44t-25.5 -38t-15.5 -22.5q-20 -33 -20 -69q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1024 512q0 -212 -150 -362t-362 -150t-362 150t-150 362 q0 145 81 275q6 9 62.5 90.5t101 151t99.5 178t83 201.5q9 30 34 47t51 17t51.5 -17t33.5 -47q28 -93 83 -201.5t99.5 -178t101 -151t62.5 -90.5q81 -127 81 -275z" />
+<glyph unicode="&#xf044;" horiz-adv-x="1792" d="M888 352l116 116l-152 152l-116 -116v-56h96v-96h56zM1328 1072q-16 16 -33 -1l-350 -350q-17 -17 -1 -33t33 1l350 350q17 17 1 33zM1408 478v-190q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832 q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-14 -14 -32 -8q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v126q0 13 9 22l64 64q15 15 35 7t20 -29zM1312 1216l288 -288l-672 -672h-288v288zM1756 1084l-92 -92 l-288 288l92 92q28 28 68 28t68 -28l152 -152q28 -28 28 -68t-28 -68z" />
+<glyph unicode="&#xf045;" horiz-adv-x="1664" d="M1408 547v-259q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h255v0q13 0 22.5 -9.5t9.5 -22.5q0 -27 -26 -32q-77 -26 -133 -60q-10 -4 -16 -4h-112q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832 q66 0 113 47t47 113v214q0 19 18 29q28 13 54 37q16 16 35 8q21 -9 21 -29zM1645 1043l-384 -384q-18 -19 -45 -19q-12 0 -25 5q-39 17 -39 59v192h-160q-323 0 -438 -131q-119 -137 -74 -473q3 -23 -20 -34q-8 -2 -12 -2q-16 0 -26 13q-10 14 -21 31t-39.5 68.5t-49.5 99.5 t-38.5 114t-17.5 122q0 49 3.5 91t14 90t28 88t47 81.5t68.5 74t94.5 61.5t124.5 48.5t159.5 30.5t196.5 11h160v192q0 42 39 59q13 5 25 5q26 0 45 -19l384 -384q19 -19 19 -45t-19 -45z" />
+<glyph unicode="&#xf046;" horiz-adv-x="1664" d="M1408 606v-318q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-10 -10 -23 -10q-3 0 -9 2q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832 q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v254q0 13 9 22l64 64q10 10 23 10q6 0 12 -3q20 -8 20 -29zM1639 1095l-814 -814q-24 -24 -57 -24t-57 24l-430 430q-24 24 -24 57t24 57l110 110q24 24 57 24t57 -24l263 -263l647 647q24 24 57 24t57 -24l110 -110 q24 -24 24 -57t-24 -57z" />
+<glyph unicode="&#xf047;" horiz-adv-x="1792" d="M1792 640q0 -26 -19 -45l-256 -256q-19 -19 -45 -19t-45 19t-19 45v128h-384v-384h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45t19 45t45 19h128v384h-384v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45 t19 45l256 256q19 19 45 19t45 -19t19 -45v-128h384v384h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-384h384v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45z" />
+<glyph unicode="&#xf048;" horiz-adv-x="1024" d="M979 1395q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19z" />
+<glyph unicode="&#xf049;" horiz-adv-x="1792" d="M1747 1395q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19l710 710 q19 19 32 13t13 -32v-710q4 11 13 19z" />
+<glyph unicode="&#xf04a;" horiz-adv-x="1664" d="M1619 1395q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-8 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-19 19 -19 45t19 45l710 710q19 19 32 13t13 -32v-710q5 11 13 19z" />
+<glyph unicode="&#xf04b;" horiz-adv-x="1408" d="M1384 609l-1328 -738q-23 -13 -39.5 -3t-16.5 36v1472q0 26 16.5 36t39.5 -3l1328 -738q23 -13 23 -31t-23 -31z" />
+<glyph unicode="&#xf04c;" d="M1536 1344v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45zM640 1344v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf04d;" d="M1536 1344v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf04e;" horiz-adv-x="1664" d="M45 -115q-19 -19 -32 -13t-13 32v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q19 -19 19 -45t-19 -45l-710 -710q-19 -19 -32 -13t-13 32v710q-5 -10 -13 -19z" />
+<glyph unicode="&#xf050;" horiz-adv-x="1792" d="M45 -115q-19 -19 -32 -13t-13 32v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19l-710 -710 q-19 -19 -32 -13t-13 32v710q-5 -10 -13 -19z" />
+<glyph unicode="&#xf051;" horiz-adv-x="1024" d="M45 -115q-19 -19 -32 -13t-13 32v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19z" />
+<glyph unicode="&#xf052;" horiz-adv-x="1538" d="M14 557l710 710q19 19 45 19t45 -19l710 -710q19 -19 13 -32t-32 -13h-1472q-26 0 -32 13t13 32zM1473 0h-1408q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19z" />
+<glyph unicode="&#xf053;" horiz-adv-x="1152" d="M742 -37l-652 651q-37 37 -37 90.5t37 90.5l652 651q37 37 90.5 37t90.5 -37l75 -75q37 -37 37 -90.5t-37 -90.5l-486 -486l486 -485q37 -38 37 -91t-37 -90l-75 -75q-37 -37 -90.5 -37t-90.5 37z" />
+<glyph unicode="&#xf054;" horiz-adv-x="1152" d="M1099 704q0 -52 -37 -91l-652 -651q-37 -37 -90 -37t-90 37l-76 75q-37 39 -37 91q0 53 37 90l486 486l-486 485q-37 39 -37 91q0 53 37 90l76 75q36 38 90 38t90 -38l652 -651q37 -37 37 -90z" />
+<glyph unicode="&#xf055;" d="M1216 576v128q0 26 -19 45t-45 19h-256v256q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-256h-256q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h256v-256q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v256h256q26 0 45 19t19 45zM1536 640q0 -209 -103 -385.5 t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf056;" d="M1216 576v128q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h768q26 0 45 19t19 45zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5 t103 -385.5z" />
+<glyph unicode="&#xf057;" d="M1149 414q0 26 -19 45l-181 181l181 181q19 19 19 45q0 27 -19 46l-90 90q-19 19 -46 19q-26 0 -45 -19l-181 -181l-181 181q-19 19 -45 19q-27 0 -46 -19l-90 -90q-19 -19 -19 -46q0 -26 19 -45l181 -181l-181 -181q-19 -19 -19 -45q0 -27 19 -46l90 -90q19 -19 46 -19 q26 0 45 19l181 181l181 -181q19 -19 45 -19q27 0 46 19l90 90q19 19 19 46zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf058;" d="M1284 802q0 28 -18 46l-91 90q-19 19 -45 19t-45 -19l-408 -407l-226 226q-19 19 -45 19t-45 -19l-91 -90q-18 -18 -18 -46q0 -27 18 -45l362 -362q19 -19 45 -19q27 0 46 19l543 543q18 18 18 45zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103 t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf059;" d="M896 160v192q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h192q14 0 23 9t9 23zM1152 832q0 88 -55.5 163t-138.5 116t-170 41q-243 0 -371 -213q-15 -24 8 -42l132 -100q7 -6 19 -6q16 0 25 12q53 68 86 92q34 24 86 24q48 0 85.5 -26t37.5 -59 q0 -38 -20 -61t-68 -45q-63 -28 -115.5 -86.5t-52.5 -125.5v-36q0 -14 9 -23t23 -9h192q14 0 23 9t9 23q0 19 21.5 49.5t54.5 49.5q32 18 49 28.5t46 35t44.5 48t28 60.5t12.5 81zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5 t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf05a;" d="M1024 160v160q0 14 -9 23t-23 9h-96v512q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23t23 -9h96v-320h-96q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23t23 -9h448q14 0 23 9t9 23zM896 1056v160q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23 t23 -9h192q14 0 23 9t9 23zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf05b;" d="M1197 512h-109q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h109q-32 108 -112.5 188.5t-188.5 112.5v-109q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v109q-108 -32 -188.5 -112.5t-112.5 -188.5h109q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-109 q32 -108 112.5 -188.5t188.5 -112.5v109q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-109q108 32 188.5 112.5t112.5 188.5zM1536 704v-128q0 -26 -19 -45t-45 -19h-143q-37 -161 -154.5 -278.5t-278.5 -154.5v-143q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v143 q-161 37 -278.5 154.5t-154.5 278.5h-143q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h143q37 161 154.5 278.5t278.5 154.5v143q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-143q161 -37 278.5 -154.5t154.5 -278.5h143q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf05c;" d="M1097 457l-146 -146q-10 -10 -23 -10t-23 10l-137 137l-137 -137q-10 -10 -23 -10t-23 10l-146 146q-10 10 -10 23t10 23l137 137l-137 137q-10 10 -10 23t10 23l146 146q10 10 23 10t23 -10l137 -137l137 137q10 10 23 10t23 -10l146 -146q10 -10 10 -23t-10 -23 l-137 -137l137 -137q10 -10 10 -23t-10 -23zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5 t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf05d;" d="M1171 723l-422 -422q-19 -19 -45 -19t-45 19l-294 294q-19 19 -19 45t19 45l102 102q19 19 45 19t45 -19l147 -147l275 275q19 19 45 19t45 -19l102 -102q19 -19 19 -45t-19 -45zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198 t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf05e;" d="M1312 643q0 161 -87 295l-754 -753q137 -89 297 -89q111 0 211.5 43.5t173.5 116.5t116 174.5t43 212.5zM313 344l755 754q-135 91 -300 91q-148 0 -273 -73t-198 -199t-73 -274q0 -162 89 -299zM1536 643q0 -157 -61 -300t-163.5 -246t-245 -164t-298.5 -61t-298.5 61 t-245 164t-163.5 246t-61 300t61 299.5t163.5 245.5t245 164t298.5 61t298.5 -61t245 -164t163.5 -245.5t61 -299.5z" />
+<glyph unicode="&#xf060;" d="M1536 640v-128q0 -53 -32.5 -90.5t-84.5 -37.5h-704l293 -294q38 -36 38 -90t-38 -90l-75 -76q-37 -37 -90 -37q-52 0 -91 37l-651 652q-37 37 -37 90q0 52 37 91l651 650q38 38 91 38q52 0 90 -38l75 -74q38 -38 38 -91t-38 -91l-293 -293h704q52 0 84.5 -37.5 t32.5 -90.5z" />
+<glyph unicode="&#xf061;" d="M1472 576q0 -54 -37 -91l-651 -651q-39 -37 -91 -37q-51 0 -90 37l-75 75q-38 38 -38 91t38 91l293 293h-704q-52 0 -84.5 37.5t-32.5 90.5v128q0 53 32.5 90.5t84.5 37.5h704l-293 294q-38 36 -38 90t38 90l75 75q38 38 90 38q53 0 91 -38l651 -651q37 -35 37 -90z" />
+<glyph unicode="&#xf062;" horiz-adv-x="1664" d="M1611 565q0 -51 -37 -90l-75 -75q-38 -38 -91 -38q-54 0 -90 38l-294 293v-704q0 -52 -37.5 -84.5t-90.5 -32.5h-128q-53 0 -90.5 32.5t-37.5 84.5v704l-294 -293q-36 -38 -90 -38t-90 38l-75 75q-38 38 -38 90q0 53 38 91l651 651q35 37 90 37q54 0 91 -37l651 -651 q37 -39 37 -91z" />
+<glyph unicode="&#xf063;" horiz-adv-x="1664" d="M1611 704q0 -53 -37 -90l-651 -652q-39 -37 -91 -37q-53 0 -90 37l-651 652q-38 36 -38 90q0 53 38 91l74 75q39 37 91 37q53 0 90 -37l294 -294v704q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-704l294 294q37 37 90 37q52 0 91 -37l75 -75q37 -39 37 -91z" />
+<glyph unicode="&#xf064;" horiz-adv-x="1792" d="M1792 896q0 -26 -19 -45l-512 -512q-19 -19 -45 -19t-45 19t-19 45v256h-224q-98 0 -175.5 -6t-154 -21.5t-133 -42.5t-105.5 -69.5t-80 -101t-48.5 -138.5t-17.5 -181q0 -55 5 -123q0 -6 2.5 -23.5t2.5 -26.5q0 -15 -8.5 -25t-23.5 -10q-16 0 -28 17q-7 9 -13 22 t-13.5 30t-10.5 24q-127 285 -127 451q0 199 53 333q162 403 875 403h224v256q0 26 19 45t45 19t45 -19l512 -512q19 -19 19 -45z" />
+<glyph unicode="&#xf065;" d="M755 480q0 -13 -10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10t23 -10l114 -114q10 -10 10 -23zM1536 1344v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332 q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23t10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf066;" d="M768 576v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23t10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448q26 0 45 -19t19 -45zM1523 1248q0 -13 -10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45 t-45 -19h-448q-26 0 -45 19t-19 45v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10t23 -10l114 -114q10 -10 10 -23z" />
+<glyph unicode="&#xf067;" horiz-adv-x="1408" d="M1408 800v-192q0 -40 -28 -68t-68 -28h-416v-416q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v416h-416q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h416v416q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-416h416q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf068;" horiz-adv-x="1408" d="M1408 800v-192q0 -40 -28 -68t-68 -28h-1216q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h1216q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf069;" horiz-adv-x="1664" d="M1482 486q46 -26 59.5 -77.5t-12.5 -97.5l-64 -110q-26 -46 -77.5 -59.5t-97.5 12.5l-266 153v-307q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v307l-266 -153q-46 -26 -97.5 -12.5t-77.5 59.5l-64 110q-26 46 -12.5 97.5t59.5 77.5l266 154l-266 154 q-46 26 -59.5 77.5t12.5 97.5l64 110q26 46 77.5 59.5t97.5 -12.5l266 -153v307q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-307l266 153q46 26 97.5 12.5t77.5 -59.5l64 -110q26 -46 12.5 -97.5t-59.5 -77.5l-266 -154z" />
+<glyph unicode="&#xf06a;" d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM896 161v190q0 14 -9 23.5t-22 9.5h-192q-13 0 -23 -10t-10 -23v-190q0 -13 10 -23t23 -10h192 q13 0 22 9.5t9 23.5zM894 505l18 621q0 12 -10 18q-10 8 -24 8h-220q-14 0 -24 -8q-10 -6 -10 -18l17 -621q0 -10 10 -17.5t24 -7.5h185q14 0 23.5 7.5t10.5 17.5z" />
+<glyph unicode="&#xf06b;" d="M928 180v56v468v192h-320v-192v-468v-56q0 -25 18 -38.5t46 -13.5h192q28 0 46 13.5t18 38.5zM472 1024h195l-126 161q-26 31 -69 31q-40 0 -68 -28t-28 -68t28 -68t68 -28zM1160 1120q0 40 -28 68t-68 28q-43 0 -69 -31l-125 -161h194q40 0 68 28t28 68zM1536 864v-320 q0 -14 -9 -23t-23 -9h-96v-416q0 -40 -28 -68t-68 -28h-1088q-40 0 -68 28t-28 68v416h-96q-14 0 -23 9t-9 23v320q0 14 9 23t23 9h440q-93 0 -158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5q107 0 168 -77l128 -165l128 165q61 77 168 77q93 0 158.5 -65.5t65.5 -158.5 t-65.5 -158.5t-158.5 -65.5h440q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf06c;" horiz-adv-x="1792" d="M1280 832q0 26 -19 45t-45 19q-172 0 -318 -49.5t-259.5 -134t-235.5 -219.5q-19 -21 -19 -45q0 -26 19 -45t45 -19q24 0 45 19q27 24 74 71t67 66q137 124 268.5 176t313.5 52q26 0 45 19t19 45zM1792 1030q0 -95 -20 -193q-46 -224 -184.5 -383t-357.5 -268 q-214 -108 -438 -108q-148 0 -286 47q-15 5 -88 42t-96 37q-16 0 -39.5 -32t-45 -70t-52.5 -70t-60 -32q-30 0 -51 11t-31 24t-27 42q-2 4 -6 11t-5.5 10t-3 9.5t-1.5 13.5q0 35 31 73.5t68 65.5t68 56t31 48q0 4 -14 38t-16 44q-9 51 -9 104q0 115 43.5 220t119 184.5 t170.5 139t204 95.5q55 18 145 25.5t179.5 9t178.5 6t163.5 24t113.5 56.5l29.5 29.5t29.5 28t27 20t36.5 16t43.5 4.5q39 0 70.5 -46t47.5 -112t24 -124t8 -96z" />
+<glyph unicode="&#xf06d;" horiz-adv-x="1408" d="M1408 -160v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5zM1152 896q0 -78 -24.5 -144t-64 -112.5t-87.5 -88t-96 -77.5t-87.5 -72t-64 -81.5t-24.5 -96.5q0 -96 67 -224l-4 1l1 -1 q-90 41 -160 83t-138.5 100t-113.5 122.5t-72.5 150.5t-27.5 184q0 78 24.5 144t64 112.5t87.5 88t96 77.5t87.5 72t64 81.5t24.5 96.5q0 94 -66 224l3 -1l-1 1q90 -41 160 -83t138.5 -100t113.5 -122.5t72.5 -150.5t27.5 -184z" />
+<glyph unicode="&#xf06e;" horiz-adv-x="1792" d="M1664 576q-152 236 -381 353q61 -104 61 -225q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 121 61 225q-229 -117 -381 -353q133 -205 333.5 -326.5t434.5 -121.5t434.5 121.5t333.5 326.5zM944 960q0 20 -14 34t-34 14q-125 0 -214.5 -89.5 t-89.5 -214.5q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34zM1792 576q0 -34 -20 -69q-140 -230 -376.5 -368.5t-499.5 -138.5t-499.5 139t-376.5 368q-20 35 -20 69t20 69q140 229 376.5 368t499.5 139t499.5 -139t376.5 -368q20 -35 20 -69z" />
+<glyph unicode="&#xf070;" horiz-adv-x="1792" d="M555 201l78 141q-87 63 -136 159t-49 203q0 121 61 225q-229 -117 -381 -353q167 -258 427 -375zM944 960q0 20 -14 34t-34 14q-125 0 -214.5 -89.5t-89.5 -214.5q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34zM1307 1151q0 -7 -1 -9 q-105 -188 -315 -566t-316 -567l-49 -89q-10 -16 -28 -16q-12 0 -134 70q-16 10 -16 28q0 12 44 87q-143 65 -263.5 173t-208.5 245q-20 31 -20 69t20 69q153 235 380 371t496 136q89 0 180 -17l54 97q10 16 28 16q5 0 18 -6t31 -15.5t33 -18.5t31.5 -18.5t19.5 -11.5 q16 -10 16 -27zM1344 704q0 -139 -79 -253.5t-209 -164.5l280 502q8 -45 8 -84zM1792 576q0 -35 -20 -69q-39 -64 -109 -145q-150 -172 -347.5 -267t-419.5 -95l74 132q212 18 392.5 137t301.5 307q-115 179 -282 294l63 112q95 -64 182.5 -153t144.5 -184q20 -34 20 -69z " />
+<glyph unicode="&#xf071;" horiz-adv-x="1792" d="M1024 161v190q0 14 -9.5 23.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -23.5v-190q0 -14 9.5 -23.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 23.5zM1022 535l18 459q0 12 -10 19q-13 11 -24 11h-220q-11 0 -24 -11q-10 -7 -10 -21l17 -457q0 -10 10 -16.5t24 -6.5h185 q14 0 23.5 6.5t10.5 16.5zM1008 1469l768 -1408q35 -63 -2 -126q-17 -29 -46.5 -46t-63.5 -17h-1536q-34 0 -63.5 17t-46.5 46q-37 63 -2 126l768 1408q17 31 47 49t65 18t65 -18t47 -49z" />
+<glyph unicode="&#xf072;" horiz-adv-x="1408" d="M1376 1376q44 -52 12 -148t-108 -172l-161 -161l160 -696q5 -19 -12 -33l-128 -96q-7 -6 -19 -6q-4 0 -7 1q-15 3 -21 16l-279 508l-259 -259l53 -194q5 -17 -8 -31l-96 -96q-9 -9 -23 -9h-2q-15 2 -24 13l-189 252l-252 189q-11 7 -13 23q-1 13 9 25l96 97q9 9 23 9 q6 0 8 -1l194 -53l259 259l-508 279q-14 8 -17 24q-2 16 9 27l128 128q14 13 30 8l665 -159l160 160q76 76 172 108t148 -12z" />
+<glyph unicode="&#xf073;" horiz-adv-x="1664" d="M128 -128h288v288h-288v-288zM480 -128h320v288h-320v-288zM128 224h288v320h-288v-320zM480 224h320v320h-320v-320zM128 608h288v288h-288v-288zM864 -128h320v288h-320v-288zM480 608h320v288h-320v-288zM1248 -128h288v288h-288v-288zM864 224h320v320h-320v-320z M512 1088v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5zM1248 224h288v320h-288v-320zM864 608h320v288h-320v-288zM1248 608h288v288h-288v-288zM1280 1088v288q0 13 -9.5 22.5t-22.5 9.5h-64 q-13 0 -22.5 -9.5t-9.5 -22.5v-288q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5zM1664 1152v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47 h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90z" />
+<glyph unicode="&#xf074;" horiz-adv-x="1792" d="M666 1055q-60 -92 -137 -273q-22 45 -37 72.5t-40.5 63.5t-51 56.5t-63 35t-81.5 14.5h-224q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h224q250 0 410 -225zM1792 256q0 -14 -9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192q-32 0 -85 -0.5t-81 -1t-73 1 t-71 5t-64 10.5t-63 18.5t-58 28.5t-59 40t-55 53.5t-56 69.5q59 93 136 273q22 -45 37 -72.5t40.5 -63.5t51 -56.5t63 -35t81.5 -14.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23zM1792 1152q0 -14 -9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5 v192h-256q-48 0 -87 -15t-69 -45t-51 -61.5t-45 -77.5q-32 -62 -78 -171q-29 -66 -49.5 -111t-54 -105t-64 -100t-74 -83t-90 -68.5t-106.5 -42t-128 -16.5h-224q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h224q48 0 87 15t69 45t51 61.5t45 77.5q32 62 78 171q29 66 49.5 111 t54 105t64 100t74 83t90 68.5t106.5 42t128 16.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23z" />
+<glyph unicode="&#xf075;" horiz-adv-x="1792" d="M1792 640q0 -174 -120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22q-17 -2 -30.5 9t-17.5 29v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281 q0 130 71 248.5t191 204.5t286 136.5t348 50.5q244 0 450 -85.5t326 -233t120 -321.5z" />
+<glyph unicode="&#xf076;" d="M1536 704v-128q0 -201 -98.5 -362t-274 -251.5t-395.5 -90.5t-395.5 90.5t-274 251.5t-98.5 362v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -52 23.5 -90t53.5 -57t71 -30t64 -13t44 -2t44 2t64 13t71 30t53.5 57t23.5 90v128q0 26 19 45t45 19h384 q26 0 45 -19t19 -45zM512 1344v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45zM1536 1344v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf077;" horiz-adv-x="1664" d="M1611 320q0 -53 -37 -90l-75 -75q-38 -38 -91 -38q-54 0 -90 38l-486 485l-486 -485q-36 -38 -90 -38t-90 38l-75 75q-38 36 -38 90q0 53 38 91l651 651q37 37 90 37q52 0 91 -37l650 -651q38 -38 38 -91z" />
+<glyph unicode="&#xf078;" horiz-adv-x="1664" d="M1611 832q0 -53 -37 -90l-651 -651q-38 -38 -91 -38q-54 0 -90 38l-651 651q-38 36 -38 90q0 53 38 91l74 75q39 37 91 37q53 0 90 -37l486 -486l486 486q37 37 90 37q52 0 91 -37l75 -75q37 -39 37 -91z" />
+<glyph unicode="&#xf079;" horiz-adv-x="1920" d="M1280 32q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-8 0 -13.5 2t-9 7t-5.5 8t-3 11.5t-1 11.5v13v11v160v416h-192q-26 0 -45 19t-19 45q0 24 15 41l320 384q19 22 49 22t49 -22l320 -384q15 -17 15 -41q0 -26 -19 -45t-45 -19h-192v-384h576q16 0 25 -11l160 -192q7 -11 7 -21 zM1920 448q0 -24 -15 -41l-320 -384q-20 -23 -49 -23t-49 23l-320 384q-15 17 -15 41q0 26 19 45t45 19h192v384h-576q-16 0 -25 12l-160 192q-7 9 -7 20q0 13 9.5 22.5t22.5 9.5h960q8 0 13.5 -2t9 -7t5.5 -8t3 -11.5t1 -11.5v-13v-11v-160v-416h192q26 0 45 -19t19 -45z " />
+<glyph unicode="&#xf07a;" horiz-adv-x="1664" d="M640 0q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1536 0q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1664 1088v-512q0 -24 -16 -42.5t-41 -21.5 l-1044 -122q1 -7 4.5 -21.5t6 -26.5t2.5 -22q0 -16 -24 -64h920q26 0 45 -19t19 -45t-19 -45t-45 -19h-1024q-26 0 -45 19t-19 45q0 14 11 39.5t29.5 59.5t20.5 38l-177 823h-204q-26 0 -45 19t-19 45t19 45t45 19h256q16 0 28.5 -6.5t20 -15.5t13 -24.5t7.5 -26.5 t5.5 -29.5t4.5 -25.5h1201q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf07b;" horiz-adv-x="1664" d="M1664 928v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158z" />
+<glyph unicode="&#xf07c;" horiz-adv-x="1920" d="M1879 584q0 -31 -31 -66l-336 -396q-43 -51 -120.5 -86.5t-143.5 -35.5h-1088q-34 0 -60.5 13t-26.5 43q0 31 31 66l336 396q43 51 120.5 86.5t143.5 35.5h1088q34 0 60.5 -13t26.5 -43zM1536 928v-160h-832q-94 0 -197 -47.5t-164 -119.5l-337 -396l-5 -6q0 4 -0.5 12.5 t-0.5 12.5v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158z" />
+<glyph unicode="&#xf07d;" horiz-adv-x="768" d="M704 1216q0 -26 -19 -45t-45 -19h-128v-1024h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45t19 45t45 19h128v1024h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45z" />
+<glyph unicode="&#xf07e;" horiz-adv-x="1792" d="M1792 640q0 -26 -19 -45l-256 -256q-19 -19 -45 -19t-45 19t-19 45v128h-1024v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45t19 45l256 256q19 19 45 19t45 -19t19 -45v-128h1024v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45z" />
+<glyph unicode="&#xf080;" horiz-adv-x="1920" d="M512 512v-384h-256v384h256zM896 1024v-896h-256v896h256zM1280 768v-640h-256v640h256zM1664 1152v-1024h-256v1024h256zM1792 32v1216q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5z M1920 1248v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf081;" d="M1280 926q-56 -25 -121 -34q68 40 93 117q-65 -38 -134 -51q-61 66 -153 66q-87 0 -148.5 -61.5t-61.5 -148.5q0 -29 5 -48q-129 7 -242 65t-192 155q-29 -50 -29 -106q0 -114 91 -175q-47 1 -100 26v-2q0 -75 50 -133.5t123 -72.5q-29 -8 -51 -8q-13 0 -39 4 q21 -63 74.5 -104t121.5 -42q-116 -90 -261 -90q-26 0 -50 3q148 -94 322 -94q112 0 210 35.5t168 95t120.5 137t75 162t24.5 168.5q0 18 -1 27q63 45 105 109zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5 t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf082;" d="M1307 618l23 219h-198v109q0 49 15.5 68.5t71.5 19.5h110v219h-175q-152 0 -218 -72t-66 -213v-131h-131v-219h131v-635h262v635h175zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960 q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf083;" horiz-adv-x="1792" d="M928 704q0 14 -9 23t-23 9q-66 0 -113 -47t-47 -113q0 -14 9 -23t23 -9t23 9t9 23q0 40 28 68t68 28q14 0 23 9t9 23zM1152 574q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181zM128 0h1536v128h-1536v-128zM1280 574q0 159 -112.5 271.5 t-271.5 112.5t-271.5 -112.5t-112.5 -271.5t112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5zM256 1216h384v128h-384v-128zM128 1024h1536v118v138h-828l-64 -128h-644v-128zM1792 1280v-1280q0 -53 -37.5 -90.5t-90.5 -37.5h-1536q-53 0 -90.5 37.5t-37.5 90.5v1280 q0 53 37.5 90.5t90.5 37.5h1536q53 0 90.5 -37.5t37.5 -90.5z" />
+<glyph unicode="&#xf084;" horiz-adv-x="1792" d="M832 1024q0 80 -56 136t-136 56t-136 -56t-56 -136q0 -42 19 -83q-41 19 -83 19q-80 0 -136 -56t-56 -136t56 -136t136 -56t136 56t56 136q0 42 -19 83q41 -19 83 -19q80 0 136 56t56 136zM1683 320q0 -17 -49 -66t-66 -49q-9 0 -28.5 16t-36.5 33t-38.5 40t-24.5 26 l-96 -96l220 -220q28 -28 28 -68q0 -42 -39 -81t-81 -39q-40 0 -68 28l-671 671q-176 -131 -365 -131q-163 0 -265.5 102.5t-102.5 265.5q0 160 95 313t248 248t313 95q163 0 265.5 -102.5t102.5 -265.5q0 -189 -131 -365l355 -355l96 96q-3 3 -26 24.5t-40 38.5t-33 36.5 t-16 28.5q0 17 49 66t66 49q13 0 23 -10q6 -6 46 -44.5t82 -79.5t86.5 -86t73 -78t28.5 -41z" />
+<glyph unicode="&#xf085;" horiz-adv-x="1920" d="M896 640q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM1664 128q0 52 -38 90t-90 38t-90 -38t-38 -90q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1664 1152q0 52 -38 90t-90 38t-90 -38t-38 -90q0 -53 37.5 -90.5t90.5 -37.5 t90.5 37.5t37.5 90.5zM1280 731v-185q0 -10 -7 -19.5t-16 -10.5l-155 -24q-11 -35 -32 -76q34 -48 90 -115q7 -10 7 -20q0 -12 -7 -19q-23 -30 -82.5 -89.5t-78.5 -59.5q-11 0 -21 7l-115 90q-37 -19 -77 -31q-11 -108 -23 -155q-7 -24 -30 -24h-186q-11 0 -20 7.5t-10 17.5 l-23 153q-34 10 -75 31l-118 -89q-7 -7 -20 -7q-11 0 -21 8q-144 133 -144 160q0 9 7 19q10 14 41 53t47 61q-23 44 -35 82l-152 24q-10 1 -17 9.5t-7 19.5v185q0 10 7 19.5t16 10.5l155 24q11 35 32 76q-34 48 -90 115q-7 11 -7 20q0 12 7 20q22 30 82 89t79 59q11 0 21 -7 l115 -90q34 18 77 32q11 108 23 154q7 24 30 24h186q11 0 20 -7.5t10 -17.5l23 -153q34 -10 75 -31l118 89q8 7 20 7q11 0 21 -8q144 -133 144 -160q0 -9 -7 -19q-12 -16 -42 -54t-45 -60q23 -48 34 -82l152 -23q10 -2 17 -10.5t7 -19.5zM1920 198v-140q0 -16 -149 -31 q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31v140q0 16 149 31q13 29 30 52q-51 113 -51 138q0 4 4 7q4 2 35 20 t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31zM1920 1222v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71q-8 0 -46 47t-52 68 q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31v140q0 16 149 31q13 29 30 52q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70 q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31z" />
+<glyph unicode="&#xf086;" horiz-adv-x="1792" d="M1408 768q0 -139 -94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224 q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257zM1792 512q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22t-22 -7 q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230z" />
+<glyph unicode="&#xf087;" d="M256 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 768q0 51 -39 89.5t-89 38.5h-352q0 58 48 159.5t48 160.5q0 98 -32 145t-128 47q-26 -26 -38 -85t-30.5 -125.5t-59.5 -109.5q-22 -23 -77 -91q-4 -5 -23 -30t-31.5 -41t-34.5 -42.5 t-40 -44t-38.5 -35.5t-40 -27t-35.5 -9h-32v-640h32q13 0 31.5 -3t33 -6.5t38 -11t35 -11.5t35.5 -12.5t29 -10.5q211 -73 342 -73h121q192 0 192 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5q32 1 53.5 47t21.5 81zM1536 769 q0 -89 -49 -163q9 -33 9 -69q0 -77 -38 -144q3 -21 3 -43q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5h-36h-93q-96 0 -189.5 22.5t-216.5 65.5q-116 40 -138 40h-288q-53 0 -90.5 37.5t-37.5 90.5v640q0 53 37.5 90.5t90.5 37.5h274q36 24 137 155q58 75 107 128 q24 25 35.5 85.5t30.5 126.5t62 108q39 37 90 37q84 0 151 -32.5t102 -101.5t35 -186q0 -93 -48 -192h176q104 0 180 -76t76 -179z" />
+<glyph unicode="&#xf088;" d="M256 1088q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 512q0 35 -21.5 81t-53.5 47q15 17 25 47.5t10 55.5q0 69 -53 119q18 32 18 69t-17.5 73.5t-47.5 52.5q5 30 5 56q0 85 -49 126t-136 41h-128q-131 0 -342 -73q-5 -2 -29 -10.5 t-35.5 -12.5t-35 -11.5t-38 -11t-33 -6.5t-31.5 -3h-32v-640h32q16 0 35.5 -9t40 -27t38.5 -35.5t40 -44t34.5 -42.5t31.5 -41t23 -30q55 -68 77 -91q41 -43 59.5 -109.5t30.5 -125.5t38 -85q96 0 128 47t32 145q0 59 -48 160.5t-48 159.5h352q50 0 89 38.5t39 89.5z M1536 511q0 -103 -76 -179t-180 -76h-176q48 -99 48 -192q0 -118 -35 -186q-35 -69 -102 -101.5t-151 -32.5q-51 0 -90 37q-34 33 -54 82t-25.5 90.5t-17.5 84.5t-31 64q-48 50 -107 127q-101 131 -137 155h-274q-53 0 -90.5 37.5t-37.5 90.5v640q0 53 37.5 90.5t90.5 37.5 h288q22 0 138 40q128 44 223 66t200 22h112q140 0 226.5 -79t85.5 -216v-5q60 -77 60 -178q0 -22 -3 -43q38 -67 38 -144q0 -36 -9 -69q49 -74 49 -163z" />
+<glyph unicode="&#xf089;" horiz-adv-x="896" d="M832 1504v-1339l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354q-25 27 -25 48q0 37 56 46l502 73l225 455q19 41 49 41z" />
+<glyph unicode="&#xf08a;" horiz-adv-x="1792" d="M1664 940q0 81 -21.5 143t-55 98.5t-81.5 59.5t-94 31t-98 8t-112 -25.5t-110.5 -64t-86.5 -72t-60 -61.5q-18 -22 -49 -22t-49 22q-24 28 -60 61.5t-86.5 72t-110.5 64t-112 25.5t-98 -8t-94 -31t-81.5 -59.5t-55 -98.5t-21.5 -143q0 -168 187 -355l581 -560l580 559 q188 188 188 356zM1792 940q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5 q224 0 351 -124t127 -344z" />
+<glyph unicode="&#xf08b;" horiz-adv-x="1664" d="M640 96q0 -4 1 -20t0.5 -26.5t-3 -23.5t-10 -19.5t-20.5 -6.5h-320q-119 0 -203.5 84.5t-84.5 203.5v704q0 119 84.5 203.5t203.5 84.5h320q13 0 22.5 -9.5t9.5 -22.5q0 -4 1 -20t0.5 -26.5t-3 -23.5t-10 -19.5t-20.5 -6.5h-320q-66 0 -113 -47t-47 -113v-704 q0 -66 47 -113t113 -47h288h11h13t11.5 -1t11.5 -3t8 -5.5t7 -9t2 -13.5zM1568 640q0 -26 -19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45z" />
+<glyph unicode="&#xf08c;" d="M237 122h231v694h-231v-694zM483 1030q-1 52 -36 86t-93 34t-94.5 -34t-36.5 -86q0 -51 35.5 -85.5t92.5 -34.5h1q59 0 95 34.5t36 85.5zM1068 122h231v398q0 154 -73 233t-193 79q-136 0 -209 -117h2v101h-231q3 -66 0 -694h231v388q0 38 7 56q15 35 45 59.5t74 24.5 q116 0 116 -157v-371zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf08d;" horiz-adv-x="1152" d="M480 672v448q0 14 -9 23t-23 9t-23 -9t-9 -23v-448q0 -14 9 -23t23 -9t23 9t9 23zM1152 320q0 -26 -19 -45t-45 -19h-429l-51 -483q-2 -12 -10.5 -20.5t-20.5 -8.5h-1q-27 0 -32 27l-76 485h-404q-26 0 -45 19t-19 45q0 123 78.5 221.5t177.5 98.5v512q-52 0 -90 38 t-38 90t38 90t90 38h640q52 0 90 -38t38 -90t-38 -90t-90 -38v-512q99 0 177.5 -98.5t78.5 -221.5z" />
+<glyph unicode="&#xf08e;" horiz-adv-x="1792" d="M1408 608v-320q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v320 q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1792 1472v-512q0 -26 -19 -45t-45 -19t-45 19l-176 176l-652 -652q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23t10 23l652 652l-176 176q-19 19 -19 45t19 45t45 19h512q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf090;" d="M1184 640q0 -26 -19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45zM1536 992v-704q0 -119 -84.5 -203.5t-203.5 -84.5h-320q-13 0 -22.5 9.5t-9.5 22.5 q0 4 -1 20t-0.5 26.5t3 23.5t10 19.5t20.5 6.5h320q66 0 113 47t47 113v704q0 66 -47 113t-113 47h-288h-11h-13t-11.5 1t-11.5 3t-8 5.5t-7 9t-2 13.5q0 4 -1 20t-0.5 26.5t3 23.5t10 19.5t20.5 6.5h320q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf091;" horiz-adv-x="1664" d="M458 653q-74 162 -74 371h-256v-96q0 -78 94.5 -162t235.5 -113zM1536 928v96h-256q0 -209 -74 -371q141 29 235.5 113t94.5 162zM1664 1056v-128q0 -71 -41.5 -143t-112 -130t-173 -97.5t-215.5 -44.5q-42 -54 -95 -95q-38 -34 -52.5 -72.5t-14.5 -89.5q0 -54 30.5 -91 t97.5 -37q75 0 133.5 -45.5t58.5 -114.5v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v64q0 69 58.5 114.5t133.5 45.5q67 0 97.5 37t30.5 91q0 51 -14.5 89.5t-52.5 72.5q-53 41 -95 95q-113 5 -215.5 44.5t-173 97.5t-112 130t-41.5 143v128q0 40 28 68t68 28h288v96 q0 66 47 113t113 47h576q66 0 113 -47t47 -113v-96h288q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf092;" d="M394 184q-8 -9 -20 3q-13 11 -4 19q8 9 20 -3q12 -11 4 -19zM352 245q9 -12 0 -19q-8 -6 -17 7t0 18q9 7 17 -6zM291 305q-5 -7 -13 -2q-10 5 -7 12q3 5 13 2q10 -5 7 -12zM322 271q-6 -7 -16 3q-9 11 -2 16q6 6 16 -3q9 -11 2 -16zM451 159q-4 -12 -19 -6q-17 4 -13 15 t19 7q16 -5 13 -16zM514 154q0 -11 -16 -11q-17 -2 -17 11q0 11 16 11q17 2 17 -11zM572 164q2 -10 -14 -14t-18 8t14 15q16 2 18 -9zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-224q-16 0 -24.5 1t-19.5 5t-16 14.5t-5 27.5v239q0 97 -52 142q57 6 102.5 18t94 39 t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204q-28 9 -81 -11t-92 -44l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103 q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52t-49.5 24l-20 3q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -103t0.5 -68q0 -22 -11 -33.5t-22 -13t-33 -1.5 h-224q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf093;" horiz-adv-x="1664" d="M1280 64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1536 64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 288v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h427q21 -56 70.5 -92 t110.5 -36h256q61 0 110.5 36t70.5 92h427q40 0 68 -28t28 -68zM1339 936q-17 -40 -59 -40h-256v-448q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v448h-256q-42 0 -59 40q-17 39 14 69l448 448q18 19 45 19t45 -19l448 -448q31 -30 14 -69z" />
+<glyph unicode="&#xf094;" d="M1407 710q0 44 -7 113.5t-18 96.5q-12 30 -17 44t-9 36.5t-4 48.5q0 23 5 68.5t5 67.5q0 37 -10 55q-4 1 -13 1q-19 0 -58 -4.5t-59 -4.5q-60 0 -176 24t-175 24q-43 0 -94.5 -11.5t-85 -23.5t-89.5 -34q-137 -54 -202 -103q-96 -73 -159.5 -189.5t-88 -236t-24.5 -248.5 q0 -40 12.5 -120t12.5 -121q0 -23 -11 -66.5t-11 -65.5t12 -36.5t34 -14.5q24 0 72.5 11t73.5 11q57 0 169.5 -15.5t169.5 -15.5q181 0 284 36q129 45 235.5 152.5t166 245.5t59.5 275zM1535 712q0 -165 -70 -327.5t-196 -288t-281 -180.5q-124 -44 -326 -44 q-57 0 -170 14.5t-169 14.5q-24 0 -72.5 -14.5t-73.5 -14.5q-73 0 -123.5 55.5t-50.5 128.5q0 24 11 68t11 67q0 40 -12.5 120.5t-12.5 121.5q0 111 18 217.5t54.5 209.5t100.5 194t150 156q78 59 232 120q194 78 316 78q60 0 175.5 -24t173.5 -24q19 0 57 5t58 5 q81 0 118 -50.5t37 -134.5q0 -23 -5 -68t-5 -68q0 -10 1 -18.5t3 -17t4 -13.5t6.5 -16t6.5 -17q16 -40 25 -118.5t9 -136.5z" />
+<glyph unicode="&#xf095;" horiz-adv-x="1408" d="M1408 296q0 -27 -10 -70.5t-21 -68.5q-21 -50 -122 -106q-94 -51 -186 -51q-27 0 -52.5 3.5t-57.5 12.5t-47.5 14.5t-55.5 20.5t-49 18q-98 35 -175 83q-128 79 -264.5 215.5t-215.5 264.5q-48 77 -83 175q-3 9 -18 49t-20.5 55.5t-14.5 47.5t-12.5 57.5t-3.5 52.5 q0 92 51 186q56 101 106 122q25 11 68.5 21t70.5 10q14 0 21 -3q18 -6 53 -76q11 -19 30 -54t35 -63.5t31 -53.5q3 -4 17.5 -25t21.5 -35.5t7 -28.5q0 -20 -28.5 -50t-62 -55t-62 -53t-28.5 -46q0 -9 5 -22.5t8.5 -20.5t14 -24t11.5 -19q76 -137 174 -235t235 -174 q2 -1 19 -11.5t24 -14t20.5 -8.5t22.5 -5q18 0 46 28.5t53 62t55 62t50 28.5q14 0 28.5 -7t35.5 -21.5t25 -17.5q25 -15 53.5 -31t63.5 -35t54 -30q70 -35 76 -53q3 -7 3 -21z" />
+<glyph unicode="&#xf096;" horiz-adv-x="1408" d="M1120 1280h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47zM1408 1120v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832 q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf097;" horiz-adv-x="1280" d="M1152 1280h-1024v-1242l423 406l89 85l89 -85l423 -406v1242zM1164 1408q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62v1289 q0 34 19.5 62t52.5 41q21 9 44 9h1048z" />
+<glyph unicode="&#xf098;" d="M1280 343q0 11 -2 16q-3 8 -38.5 29.5t-88.5 49.5l-53 29q-5 3 -19 13t-25 15t-21 5q-18 0 -47 -32.5t-57 -65.5t-44 -33q-7 0 -16.5 3.5t-15.5 6.5t-17 9.5t-14 8.5q-99 55 -170.5 126.5t-126.5 170.5q-2 3 -8.5 14t-9.5 17t-6.5 15.5t-3.5 16.5q0 13 20.5 33.5t45 38.5 t45 39.5t20.5 36.5q0 10 -5 21t-15 25t-13 19q-3 6 -15 28.5t-25 45.5t-26.5 47.5t-25 40.5t-16.5 18t-16 2q-48 0 -101 -22q-46 -21 -80 -94.5t-34 -130.5q0 -16 2.5 -34t5 -30.5t9 -33t10 -29.5t12.5 -33t11 -30q60 -164 216.5 -320.5t320.5 -216.5q6 -2 30 -11t33 -12.5 t29.5 -10t33 -9t30.5 -5t34 -2.5q57 0 130.5 34t94.5 80q22 53 22 101zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf099;" horiz-adv-x="1664" d="M1620 1128q-67 -98 -162 -167q1 -14 1 -42q0 -130 -38 -259.5t-115.5 -248.5t-184.5 -210.5t-258 -146t-323 -54.5q-271 0 -496 145q35 -4 78 -4q225 0 401 138q-105 2 -188 64.5t-114 159.5q33 -5 61 -5q43 0 85 11q-112 23 -185.5 111.5t-73.5 205.5v4q68 -38 146 -41 q-66 44 -105 115t-39 154q0 88 44 163q121 -149 294.5 -238.5t371.5 -99.5q-8 38 -8 74q0 134 94.5 228.5t228.5 94.5q140 0 236 -102q109 21 205 78q-37 -115 -142 -178q93 10 186 50z" />
+<glyph unicode="&#xf09a;" horiz-adv-x="768" d="M511 980h257l-30 -284h-227v-824h-341v824h-170v284h170v171q0 182 86 275.5t283 93.5h227v-284h-142q-39 0 -62.5 -6.5t-34 -23.5t-13.5 -34.5t-3 -49.5v-142z" />
+<glyph unicode="&#xf09b;" d="M1536 640q0 -251 -146.5 -451.5t-378.5 -277.5q-27 -5 -39.5 7t-12.5 30v211q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204q-28 9 -81 -11t-92 -44l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5 q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52t-49.5 24l-20 3q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23 q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -89t0.5 -54q0 -18 -13 -30t-40 -7q-232 77 -378.5 277.5t-146.5 451.5q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf09c;" horiz-adv-x="1664" d="M1664 960v-256q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45v256q0 106 -75 181t-181 75t-181 -75t-75 -181v-192h96q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v576q0 40 28 68t68 28h672v192q0 185 131.5 316.5t316.5 131.5 t316.5 -131.5t131.5 -316.5z" />
+<glyph unicode="&#xf09d;" horiz-adv-x="1920" d="M1760 1408q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1600zM160 1280q-13 0 -22.5 -9.5t-9.5 -22.5v-224h1664v224q0 13 -9.5 22.5t-22.5 9.5h-1600zM1760 0q13 0 22.5 9.5t9.5 22.5v608h-1664v-608 q0 -13 9.5 -22.5t22.5 -9.5h1600zM256 128v128h256v-128h-256zM640 128v128h384v-128h-384z" />
+<glyph unicode="&#xf09e;" horiz-adv-x="1408" d="M384 192q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM896 69q2 -28 -17 -48q-18 -21 -47 -21h-135q-25 0 -43 16.5t-20 41.5q-22 229 -184.5 391.5t-391.5 184.5q-25 2 -41.5 20t-16.5 43v135q0 29 21 47q17 17 43 17h5q160 -13 306 -80.5 t259 -181.5q114 -113 181.5 -259t80.5 -306zM1408 67q2 -27 -18 -47q-18 -20 -46 -20h-143q-26 0 -44.5 17.5t-19.5 42.5q-12 215 -101 408.5t-231.5 336t-336 231.5t-408.5 102q-25 1 -42.5 19.5t-17.5 43.5v143q0 28 20 46q18 18 44 18h3q262 -13 501.5 -120t425.5 -294 q187 -186 294 -425.5t120 -501.5z" />
+<glyph unicode="&#xf0a0;" d="M1040 320q0 -33 -23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5t23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5zM1296 320q0 -33 -23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5t23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5zM1408 160v320q0 13 -9.5 22.5t-22.5 9.5 h-1216q-13 0 -22.5 -9.5t-9.5 -22.5v-320q0 -13 9.5 -22.5t22.5 -9.5h1216q13 0 22.5 9.5t9.5 22.5zM178 640h1180l-157 482q-4 13 -16 21.5t-26 8.5h-782q-14 0 -26 -8.5t-16 -21.5zM1536 480v-320q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113v320q0 25 16 75 l197 606q17 53 63 86t101 33h782q55 0 101 -33t63 -86l197 -606q16 -50 16 -75z" />
+<glyph unicode="&#xf0a1;" horiz-adv-x="1792" d="M1664 896q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5v-384q0 -52 -38 -90t-90 -38q-417 347 -812 380q-58 -19 -91 -66t-31 -100.5t40 -92.5q-20 -33 -23 -65.5t6 -58t33.5 -55t48 -50t61.5 -50.5q-29 -58 -111.5 -83t-168.5 -11.5t-132 55.5q-7 23 -29.5 87.5 t-32 94.5t-23 89t-15 101t3.5 98.5t22 110.5h-122q-66 0 -113 47t-47 113v192q0 66 47 113t113 47h480q435 0 896 384q52 0 90 -38t38 -90v-384zM1536 292v954q-394 -302 -768 -343v-270q377 -42 768 -341z" />
+<glyph unicode="&#xf0a2;" horiz-adv-x="1664" d="M848 -160q0 16 -16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16zM183 128h1298q-164 181 -246.5 411.5t-82.5 484.5q0 256 -320 256t-320 -256q0 -254 -82.5 -484.5t-246.5 -411.5zM1664 128q0 -52 -38 -90t-90 -38 h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38t-38 90q190 161 287 397.5t97 498.5q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5z" />
+<glyph unicode="&#xf0a3;" d="M1376 640l138 -135q30 -28 20 -70q-12 -41 -52 -51l-188 -48l53 -186q12 -41 -19 -70q-29 -31 -70 -19l-186 53l-48 -188q-10 -40 -51 -52q-12 -2 -19 -2q-31 0 -51 22l-135 138l-135 -138q-28 -30 -70 -20q-41 11 -51 52l-48 188l-186 -53q-41 -12 -70 19q-31 29 -19 70 l53 186l-188 48q-40 10 -52 51q-10 42 20 70l138 135l-138 135q-30 28 -20 70q12 41 52 51l188 48l-53 186q-12 41 19 70q29 31 70 19l186 -53l48 188q10 41 51 51q41 12 70 -19l135 -139l135 139q29 30 70 19q41 -10 51 -51l48 -188l186 53q41 12 70 -19q31 -29 19 -70 l-53 -186l188 -48q40 -10 52 -51q10 -42 -20 -70z" />
+<glyph unicode="&#xf0a4;" horiz-adv-x="1792" d="M256 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 768q0 51 -39 89.5t-89 38.5h-576q0 20 15 48.5t33 55t33 68t15 84.5q0 67 -44.5 97.5t-115.5 30.5q-24 0 -90 -139q-24 -44 -37 -65q-40 -64 -112 -145q-71 -81 -101 -106 q-69 -57 -140 -57h-32v-640h32q72 0 167 -32t193.5 -64t179.5 -32q189 0 189 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5h331q52 0 90 38t38 90zM1792 769q0 -105 -75.5 -181t-180.5 -76h-169q-4 -62 -37 -119q3 -21 3 -43 q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5q-133 0 -322 69q-164 59 -223 59h-288q-53 0 -90.5 37.5t-37.5 90.5v640q0 53 37.5 90.5t90.5 37.5h288q10 0 21.5 4.5t23.5 14t22.5 18t24 22.5t20.5 21.5t19 21.5t14 17q65 74 100 129q13 21 33 62t37 72t40.5 63t55 49.5 t69.5 17.5q125 0 206.5 -67t81.5 -189q0 -68 -22 -128h374q104 0 180 -76t76 -179z" />
+<glyph unicode="&#xf0a5;" horiz-adv-x="1792" d="M1376 128h32v640h-32q-35 0 -67.5 12t-62.5 37t-50 46t-49 54q-2 3 -3.5 4.5t-4 4.5t-4.5 5q-72 81 -112 145q-14 22 -38 68q-1 3 -10.5 22.5t-18.5 36t-20 35.5t-21.5 30.5t-18.5 11.5q-71 0 -115.5 -30.5t-44.5 -97.5q0 -43 15 -84.5t33 -68t33 -55t15 -48.5h-576 q-50 0 -89 -38.5t-39 -89.5q0 -52 38 -90t90 -38h331q-15 -17 -25 -47.5t-10 -55.5q0 -69 53 -119q-18 -32 -18 -69t17.5 -73.5t47.5 -52.5q-4 -24 -4 -56q0 -85 48.5 -126t135.5 -41q84 0 183 32t194 64t167 32zM1664 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45 t45 -19t45 19t19 45zM1792 768v-640q0 -53 -37.5 -90.5t-90.5 -37.5h-288q-59 0 -223 -59q-190 -69 -317 -69q-142 0 -230 77.5t-87 217.5l1 5q-61 76 -61 178q0 22 3 43q-33 57 -37 119h-169q-105 0 -180.5 76t-75.5 181q0 103 76 179t180 76h374q-22 60 -22 128 q0 122 81.5 189t206.5 67q38 0 69.5 -17.5t55 -49.5t40.5 -63t37 -72t33 -62q35 -55 100 -129q2 -3 14 -17t19 -21.5t20.5 -21.5t24 -22.5t22.5 -18t23.5 -14t21.5 -4.5h288q53 0 90.5 -37.5t37.5 -90.5z" />
+<glyph unicode="&#xf0a6;" d="M1280 -64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 700q0 189 -167 189q-26 0 -56 -5q-16 30 -52.5 47.5t-73.5 17.5t-69 -18q-50 53 -119 53q-25 0 -55.5 -10t-47.5 -25v331q0 52 -38 90t-90 38q-51 0 -89.5 -39t-38.5 -89v-576 q-20 0 -48.5 15t-55 33t-68 33t-84.5 15q-67 0 -97.5 -44.5t-30.5 -115.5q0 -24 139 -90q44 -24 65 -37q64 -40 145 -112q81 -71 106 -101q57 -69 57 -140v-32h640v32q0 72 32 167t64 193.5t32 179.5zM1536 705q0 -133 -69 -322q-59 -164 -59 -223v-288q0 -53 -37.5 -90.5 t-90.5 -37.5h-640q-53 0 -90.5 37.5t-37.5 90.5v288q0 10 -4.5 21.5t-14 23.5t-18 22.5t-22.5 24t-21.5 20.5t-21.5 19t-17 14q-74 65 -129 100q-21 13 -62 33t-72 37t-63 40.5t-49.5 55t-17.5 69.5q0 125 67 206.5t189 81.5q68 0 128 -22v374q0 104 76 180t179 76 q105 0 181 -75.5t76 -180.5v-169q62 -4 119 -37q21 3 43 3q101 0 178 -60q139 1 219.5 -85t80.5 -227z" />
+<glyph unicode="&#xf0a7;" d="M1408 576q0 84 -32 183t-64 194t-32 167v32h-640v-32q0 -35 -12 -67.5t-37 -62.5t-46 -50t-54 -49q-9 -8 -14 -12q-81 -72 -145 -112q-22 -14 -68 -38q-3 -1 -22.5 -10.5t-36 -18.5t-35.5 -20t-30.5 -21.5t-11.5 -18.5q0 -71 30.5 -115.5t97.5 -44.5q43 0 84.5 15t68 33 t55 33t48.5 15v-576q0 -50 38.5 -89t89.5 -39q52 0 90 38t38 90v331q46 -35 103 -35q69 0 119 53q32 -18 69 -18t73.5 17.5t52.5 47.5q24 -4 56 -4q85 0 126 48.5t41 135.5zM1280 1344q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1536 580 q0 -142 -77.5 -230t-217.5 -87l-5 1q-76 -61 -178 -61q-22 0 -43 3q-54 -30 -119 -37v-169q0 -105 -76 -180.5t-181 -75.5q-103 0 -179 76t-76 180v374q-54 -22 -128 -22q-121 0 -188.5 81.5t-67.5 206.5q0 38 17.5 69.5t49.5 55t63 40.5t72 37t62 33q55 35 129 100 q3 2 17 14t21.5 19t21.5 20.5t22.5 24t18 22.5t14 23.5t4.5 21.5v288q0 53 37.5 90.5t90.5 37.5h640q53 0 90.5 -37.5t37.5 -90.5v-288q0 -59 59 -223q69 -190 69 -317z" />
+<glyph unicode="&#xf0a8;" d="M1280 576v128q0 26 -19 45t-45 19h-502l189 189q19 19 19 45t-19 45l-91 91q-18 18 -45 18t-45 -18l-362 -362l-91 -91q-18 -18 -18 -45t18 -45l91 -91l362 -362q18 -18 45 -18t45 18l91 91q18 18 18 45t-18 45l-189 189h502q26 0 45 19t19 45zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf0a9;" d="M1285 640q0 27 -18 45l-91 91l-362 362q-18 18 -45 18t-45 -18l-91 -91q-18 -18 -18 -45t18 -45l189 -189h-502q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h502l-189 -189q-19 -19 -19 -45t19 -45l91 -91q18 -18 45 -18t45 18l362 362l91 91q18 18 18 45zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf0aa;" d="M1284 641q0 27 -18 45l-362 362l-91 91q-18 18 -45 18t-45 -18l-91 -91l-362 -362q-18 -18 -18 -45t18 -45l91 -91q18 -18 45 -18t45 18l189 189v-502q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v502l189 -189q19 -19 45 -19t45 19l91 91q18 18 18 45zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf0ab;" d="M1284 639q0 27 -18 45l-91 91q-18 18 -45 18t-45 -18l-189 -189v502q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-502l-189 189q-19 19 -45 19t-45 -19l-91 -91q-18 -18 -18 -45t18 -45l362 -362l91 -91q18 -18 45 -18t45 18l91 91l362 362q18 18 18 45zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf0ac;" d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM1042 887q-2 -1 -9.5 -9.5t-13.5 -9.5q2 0 4.5 5t5 11t3.5 7q6 7 22 15q14 6 52 12q34 8 51 -11 q-2 2 9.5 13t14.5 12q3 2 15 4.5t15 7.5l2 22q-12 -1 -17.5 7t-6.5 21q0 -2 -6 -8q0 7 -4.5 8t-11.5 -1t-9 -1q-10 3 -15 7.5t-8 16.5t-4 15q-2 5 -9.5 10.5t-9.5 10.5q-1 2 -2.5 5.5t-3 6.5t-4 5.5t-5.5 2.5t-7 -5t-7.5 -10t-4.5 -5q-3 2 -6 1.5t-4.5 -1t-4.5 -3t-5 -3.5 q-3 -2 -8.5 -3t-8.5 -2q15 5 -1 11q-10 4 -16 3q9 4 7.5 12t-8.5 14h5q-1 4 -8.5 8.5t-17.5 8.5t-13 6q-8 5 -34 9.5t-33 0.5q-5 -6 -4.5 -10.5t4 -14t3.5 -12.5q1 -6 -5.5 -13t-6.5 -12q0 -7 14 -15.5t10 -21.5q-3 -8 -16 -16t-16 -12q-5 -8 -1.5 -18.5t10.5 -16.5 q2 -2 1.5 -4t-3.5 -4.5t-5.5 -4t-6.5 -3.5l-3 -2q-11 -5 -20.5 6t-13.5 26q-7 25 -16 30q-23 8 -29 -1q-5 13 -41 26q-25 9 -58 4q6 1 0 15q-7 15 -19 12q3 6 4 17.5t1 13.5q3 13 12 23q1 1 7 8.5t9.5 13.5t0.5 6q35 -4 50 11q5 5 11.5 17t10.5 17q9 6 14 5.5t14.5 -5.5 t14.5 -5q14 -1 15.5 11t-7.5 20q12 -1 3 17q-5 7 -8 9q-12 4 -27 -5q-8 -4 2 -8q-1 1 -9.5 -10.5t-16.5 -17.5t-16 5q-1 1 -5.5 13.5t-9.5 13.5q-8 0 -16 -15q3 8 -11 15t-24 8q19 12 -8 27q-7 4 -20.5 5t-19.5 -4q-5 -7 -5.5 -11.5t5 -8t10.5 -5.5t11.5 -4t8.5 -3 q14 -10 8 -14q-2 -1 -8.5 -3.5t-11.5 -4.5t-6 -4q-3 -4 0 -14t-2 -14q-5 5 -9 17.5t-7 16.5q7 -9 -25 -6l-10 1q-4 0 -16 -2t-20.5 -1t-13.5 8q-4 8 0 20q1 4 4 2q-4 3 -11 9.5t-10 8.5q-46 -15 -94 -41q6 -1 12 1q5 2 13 6.5t10 5.5q34 14 42 7l5 5q14 -16 20 -25 q-7 4 -30 1q-20 -6 -22 -12q7 -12 5 -18q-4 3 -11.5 10t-14.5 11t-15 5q-16 0 -22 -1q-146 -80 -235 -222q7 -7 12 -8q4 -1 5 -9t2.5 -11t11.5 3q9 -8 3 -19q1 1 44 -27q19 -17 21 -21q3 -11 -10 -18q-1 2 -9 9t-9 4q-3 -5 0.5 -18.5t10.5 -12.5q-7 0 -9.5 -16t-2.5 -35.5 t-1 -23.5l2 -1q-3 -12 5.5 -34.5t21.5 -19.5q-13 -3 20 -43q6 -8 8 -9q3 -2 12 -7.5t15 -10t10 -10.5q4 -5 10 -22.5t14 -23.5q-2 -6 9.5 -20t10.5 -23q-1 0 -2.5 -1t-2.5 -1q3 -7 15.5 -14t15.5 -13q1 -3 2 -10t3 -11t8 -2q2 20 -24 62q-15 25 -17 29q-3 5 -5.5 15.5 t-4.5 14.5q2 0 6 -1.5t8.5 -3.5t7.5 -4t2 -3q-3 -7 2 -17.5t12 -18.5t17 -19t12 -13q6 -6 14 -19.5t0 -13.5q9 0 20 -10t17 -20q5 -8 8 -26t5 -24q2 -7 8.5 -13.5t12.5 -9.5l16 -8t13 -7q5 -2 18.5 -10.5t21.5 -11.5q10 -4 16 -4t14.5 2.5t13.5 3.5q15 2 29 -15t21 -21 q36 -19 55 -11q-2 -1 0.5 -7.5t8 -15.5t9 -14.5t5.5 -8.5q5 -6 18 -15t18 -15q6 4 7 9q-3 -8 7 -20t18 -10q14 3 14 32q-31 -15 -49 18q0 1 -2.5 5.5t-4 8.5t-2.5 8.5t0 7.5t5 3q9 0 10 3.5t-2 12.5t-4 13q-1 8 -11 20t-12 15q-5 -9 -16 -8t-16 9q0 -1 -1.5 -5.5t-1.5 -6.5 q-13 0 -15 1q1 3 2.5 17.5t3.5 22.5q1 4 5.5 12t7.5 14.5t4 12.5t-4.5 9.5t-17.5 2.5q-19 -1 -26 -20q-1 -3 -3 -10.5t-5 -11.5t-9 -7q-7 -3 -24 -2t-24 5q-13 8 -22.5 29t-9.5 37q0 10 2.5 26.5t3 25t-5.5 24.5q3 2 9 9.5t10 10.5q2 1 4.5 1.5t4.5 0t4 1.5t3 6q-1 1 -4 3 q-3 3 -4 3q7 -3 28.5 1.5t27.5 -1.5q15 -11 22 2q0 1 -2.5 9.5t-0.5 13.5q5 -27 29 -9q3 -3 15.5 -5t17.5 -5q3 -2 7 -5.5t5.5 -4.5t5 0.5t8.5 6.5q10 -14 12 -24q11 -40 19 -44q7 -3 11 -2t4.5 9.5t0 14t-1.5 12.5l-1 8v18l-1 8q-15 3 -18.5 12t1.5 18.5t15 18.5q1 1 8 3.5 t15.5 6.5t12.5 8q21 19 15 35q7 0 11 9q-1 0 -5 3t-7.5 5t-4.5 2q9 5 2 16q5 3 7.5 11t7.5 10q9 -12 21 -2q7 8 1 16q5 7 20.5 10.5t18.5 9.5q7 -2 8 2t1 12t3 12q4 5 15 9t13 5l17 11q3 4 0 4q18 -2 31 11q10 11 -6 20q3 6 -3 9.5t-15 5.5q3 1 11.5 0.5t10.5 1.5 q15 10 -7 16q-17 5 -43 -12zM879 10q206 36 351 189q-3 3 -12.5 4.5t-12.5 3.5q-18 7 -24 8q1 7 -2.5 13t-8 9t-12.5 8t-11 7q-2 2 -7 6t-7 5.5t-7.5 4.5t-8.5 2t-10 -1l-3 -1q-3 -1 -5.5 -2.5t-5.5 -3t-4 -3t0 -2.5q-21 17 -36 22q-5 1 -11 5.5t-10.5 7t-10 1.5t-11.5 -7 q-5 -5 -6 -15t-2 -13q-7 5 0 17.5t2 18.5q-3 6 -10.5 4.5t-12 -4.5t-11.5 -8.5t-9 -6.5t-8.5 -5.5t-8.5 -7.5q-3 -4 -6 -12t-5 -11q-2 4 -11.5 6.5t-9.5 5.5q2 -10 4 -35t5 -38q7 -31 -12 -48q-27 -25 -29 -40q-4 -22 12 -26q0 -7 -8 -20.5t-7 -21.5q0 -6 2 -16z" />
+<glyph unicode="&#xf0ad;" horiz-adv-x="1664" d="M384 64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1028 484l-682 -682q-37 -37 -90 -37q-52 0 -91 37l-106 108q-38 36 -38 90q0 53 38 91l681 681q39 -98 114.5 -173.5t173.5 -114.5zM1662 919q0 -39 -23 -106q-47 -134 -164.5 -217.5 t-258.5 -83.5q-185 0 -316.5 131.5t-131.5 316.5t131.5 316.5t316.5 131.5q58 0 121.5 -16.5t107.5 -46.5q16 -11 16 -28t-16 -28l-293 -169v-224l193 -107q5 3 79 48.5t135.5 81t70.5 35.5q15 0 23.5 -10t8.5 -25z" />
+<glyph unicode="&#xf0ae;" horiz-adv-x="1792" d="M1024 128h640v128h-640v-128zM640 640h1024v128h-1024v-128zM1280 1152h384v128h-384v-128zM1792 320v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 832v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19 t-19 45v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 1344v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0b0;" horiz-adv-x="1408" d="M1403 1241q17 -41 -14 -70l-493 -493v-742q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-256 256q-19 19 -19 45v486l-493 493q-31 29 -14 70q17 39 59 39h1280q42 0 59 -39z" />
+<glyph unicode="&#xf0b1;" horiz-adv-x="1792" d="M640 1280h512v128h-512v-128zM1792 640v-480q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v480h672v-160q0 -26 19 -45t45 -19h320q26 0 45 19t19 45v160h672zM1024 640v-128h-256v128h256zM1792 1120v-384h-1792v384q0 66 47 113t113 47h352v160q0 40 28 68 t68 28h576q40 0 68 -28t28 -68v-160h352q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf0b2;" d="M1283 995l-355 -355l355 -355l144 144q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l144 144l-355 355l-355 -355l144 -144q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45v448q0 42 40 59q39 17 69 -14l144 -144 l355 355l-355 355l-144 -144q-19 -19 -45 -19q-12 0 -24 5q-40 17 -40 59v448q0 26 19 45t45 19h448q42 0 59 -40q17 -39 -14 -69l-144 -144l355 -355l355 355l-144 144q-31 30 -14 69q17 40 59 40h448q26 0 45 -19t19 -45v-448q0 -42 -39 -59q-13 -5 -25 -5q-26 0 -45 19z " />
+<glyph unicode="&#xf0c0;" horiz-adv-x="1920" d="M593 640q-162 -5 -265 -128h-134q-82 0 -138 40.5t-56 118.5q0 353 124 353q6 0 43.5 -21t97.5 -42.5t119 -21.5q67 0 133 23q-5 -37 -5 -66q0 -139 81 -256zM1664 3q0 -120 -73 -189.5t-194 -69.5h-874q-121 0 -194 69.5t-73 189.5q0 53 3.5 103.5t14 109t26.5 108.5 t43 97.5t62 81t85.5 53.5t111.5 20q10 0 43 -21.5t73 -48t107 -48t135 -21.5t135 21.5t107 48t73 48t43 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5zM640 1280q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75 t75 -181zM1344 896q0 -159 -112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5zM1920 671q0 -78 -56 -118.5t-138 -40.5h-134q-103 123 -265 128q81 117 81 256q0 29 -5 66q66 -23 133 -23q59 0 119 21.5t97.5 42.5 t43.5 21q124 0 124 -353zM1792 1280q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181z" />
+<glyph unicode="&#xf0c1;" horiz-adv-x="1664" d="M1456 320q0 40 -28 68l-208 208q-28 28 -68 28q-42 0 -72 -32q3 -3 19 -18.5t21.5 -21.5t15 -19t13 -25.5t3.5 -27.5q0 -40 -28 -68t-68 -28q-15 0 -27.5 3.5t-25.5 13t-19 15t-21.5 21.5t-18.5 19q-33 -31 -33 -73q0 -40 28 -68l206 -207q27 -27 68 -27q40 0 68 26 l147 146q28 28 28 67zM753 1025q0 40 -28 68l-206 207q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67q0 -40 28 -68l208 -208q27 -27 68 -27q42 0 72 31q-3 3 -19 18.5t-21.5 21.5t-15 19t-13 25.5t-3.5 27.5q0 40 28 68t68 28q15 0 27.5 -3.5t25.5 -13t19 -15 t21.5 -21.5t18.5 -19q33 31 33 73zM1648 320q0 -120 -85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-206 207q-83 83 -83 203q0 123 88 209l-88 88q-86 -88 -208 -88q-120 0 -204 84l-208 208q-84 84 -84 204t85 203l147 146q83 83 203 83q121 0 204 -85l206 -207 q83 -83 83 -203q0 -123 -88 -209l88 -88q86 88 208 88q120 0 204 -84l208 -208q84 -84 84 -204z" />
+<glyph unicode="&#xf0c2;" horiz-adv-x="1920" d="M1920 384q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5q0 132 71 241.5t187 163.5q-2 28 -2 43q0 212 150 362t362 150q158 0 286.5 -88t187.5 -230q70 62 166 62q106 0 181 -75t75 -181q0 -75 -41 -138q129 -30 213 -134.5t84 -239.5z " />
+<glyph unicode="&#xf0c3;" horiz-adv-x="1664" d="M1527 88q56 -89 21.5 -152.5t-140.5 -63.5h-1152q-106 0 -140.5 63.5t21.5 152.5l503 793v399h-64q-26 0 -45 19t-19 45t19 45t45 19h512q26 0 45 -19t19 -45t-19 -45t-45 -19h-64v-399zM748 813l-272 -429h712l-272 429l-20 31v37v399h-128v-399v-37z" />
+<glyph unicode="&#xf0c4;" horiz-adv-x="1792" d="M960 640q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1260 576l507 -398q28 -20 25 -56q-5 -35 -35 -51l-128 -64q-13 -7 -29 -7q-17 0 -31 8l-690 387l-110 -66q-8 -4 -12 -5q14 -49 10 -97q-7 -77 -56 -147.5t-132 -123.5q-132 -84 -277 -84 q-136 0 -222 78q-90 84 -79 207q7 76 56 147t131 124q132 84 278 84q83 0 151 -31q9 13 22 22l122 73l-122 73q-13 9 -22 22q-68 -31 -151 -31q-146 0 -278 84q-82 53 -131 124t-56 147q-5 59 15.5 113t63.5 93q85 79 222 79q145 0 277 -84q83 -52 132 -123t56 -148 q4 -48 -10 -97q4 -1 12 -5l110 -66l690 387q14 8 31 8q16 0 29 -7l128 -64q30 -16 35 -51q3 -36 -25 -56zM579 836q46 42 21 108t-106 117q-92 59 -192 59q-74 0 -113 -36q-46 -42 -21 -108t106 -117q92 -59 192 -59q74 0 113 36zM494 91q81 51 106 117t-21 108 q-39 36 -113 36q-100 0 -192 -59q-81 -51 -106 -117t21 -108q39 -36 113 -36q100 0 192 59zM672 704l96 -58v11q0 36 33 56l14 8l-79 47l-26 -26q-3 -3 -10 -11t-12 -12q-2 -2 -4 -3.5t-3 -2.5zM896 480l96 -32l736 576l-128 64l-768 -431v-113l-160 -96l9 -8q2 -2 7 -6 q4 -4 11 -12t11 -12l26 -26zM1600 64l128 64l-520 408l-177 -138q-2 -3 -13 -7z" />
+<glyph unicode="&#xf0c5;" horiz-adv-x="1792" d="M1696 1152q40 0 68 -28t28 -68v-1216q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v288h-544q-40 0 -68 28t-28 68v672q0 40 20 88t48 76l408 408q28 28 76 48t88 20h416q40 0 68 -28t28 -68v-328q68 40 128 40h416zM1152 939l-299 -299h299v299zM512 1323l-299 -299 h299v299zM708 676l316 316v416h-384v-416q0 -40 -28 -68t-68 -28h-416v-640h512v256q0 40 20 88t48 76zM1664 -128v1152h-384v-416q0 -40 -28 -68t-68 -28h-416v-640h896z" />
+<glyph unicode="&#xf0c6;" horiz-adv-x="1408" d="M1404 151q0 -117 -79 -196t-196 -79q-135 0 -235 100l-777 776q-113 115 -113 271q0 159 110 270t269 111q158 0 273 -113l605 -606q10 -10 10 -22q0 -16 -30.5 -46.5t-46.5 -30.5q-13 0 -23 10l-606 607q-79 77 -181 77q-106 0 -179 -75t-73 -181q0 -105 76 -181 l776 -777q63 -63 145 -63q64 0 106 42t42 106q0 82 -63 145l-581 581q-26 24 -60 24q-29 0 -48 -19t-19 -48q0 -32 25 -59l410 -410q10 -10 10 -22q0 -16 -31 -47t-47 -31q-12 0 -22 10l-410 410q-63 61 -63 149q0 82 57 139t139 57q88 0 149 -63l581 -581q100 -98 100 -235 z" />
+<glyph unicode="&#xf0c7;" d="M384 0h768v384h-768v-384zM1280 0h128v896q0 14 -10 38.5t-20 34.5l-281 281q-10 10 -34 20t-39 10v-416q0 -40 -28 -68t-68 -28h-576q-40 0 -68 28t-28 68v416h-128v-1280h128v416q0 40 28 68t68 28h832q40 0 68 -28t28 -68v-416zM896 928v320q0 13 -9.5 22.5t-22.5 9.5 h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-320q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5zM1536 896v-928q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h928q40 0 88 -20t76 -48l280 -280q28 -28 48 -76t20 -88z" />
+<glyph unicode="&#xf0c8;" d="M1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf0c9;" d="M1536 192v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1536 704v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1536 1216v-128q0 -26 -19 -45 t-45 -19h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0ca;" horiz-adv-x="1792" d="M384 128q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM384 640q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5 t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5zM384 1152q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1792 736v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5z M1792 1248v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5z" />
+<glyph unicode="&#xf0cb;" horiz-adv-x="1792" d="M381 -84q0 -80 -54.5 -126t-135.5 -46q-106 0 -172 66l57 88q49 -45 106 -45q29 0 50.5 14.5t21.5 42.5q0 64 -105 56l-26 56q8 10 32.5 43.5t42.5 54t37 38.5v1q-16 0 -48.5 -1t-48.5 -1v-53h-106v152h333v-88l-95 -115q51 -12 81 -49t30 -88zM383 543v-159h-362 q-6 36 -6 54q0 51 23.5 93t56.5 68t66 47.5t56.5 43.5t23.5 45q0 25 -14.5 38.5t-39.5 13.5q-46 0 -81 -58l-85 59q24 51 71.5 79.5t105.5 28.5q73 0 123 -41.5t50 -112.5q0 -50 -34 -91.5t-75 -64.5t-75.5 -50.5t-35.5 -52.5h127v60h105zM1792 224v-192q0 -13 -9.5 -22.5 t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5zM384 1123v-99h-335v99h107q0 41 0.5 122t0.5 121v12h-2q-8 -17 -50 -54l-71 76l136 127h106v-404h108zM1792 736v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5 t-9.5 22.5v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5zM1792 1248v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5z" />
+<glyph unicode="&#xf0cc;" horiz-adv-x="1792" d="M1760 640q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1728q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h1728zM483 704q-28 35 -51 80q-48 97 -48 188q0 181 134 309q133 127 393 127q50 0 167 -19q66 -12 177 -48q10 -38 21 -118q14 -123 14 -183q0 -18 -5 -45l-12 -3l-84 6 l-14 2q-50 149 -103 205q-88 91 -210 91q-114 0 -182 -59q-67 -58 -67 -146q0 -73 66 -140t279 -129q69 -20 173 -66q58 -28 95 -52h-743zM990 448h411q7 -39 7 -92q0 -111 -41 -212q-23 -55 -71 -104q-37 -35 -109 -81q-80 -48 -153 -66q-80 -21 -203 -21q-114 0 -195 23 l-140 40q-57 16 -72 28q-8 8 -8 22v13q0 108 -2 156q-1 30 0 68l2 37v44l102 2q15 -34 30 -71t22.5 -56t12.5 -27q35 -57 80 -94q43 -36 105 -57q59 -22 132 -22q64 0 139 27q77 26 122 86q47 61 47 129q0 84 -81 157q-34 29 -137 71z" />
+<glyph unicode="&#xf0cd;" d="M48 1313q-37 2 -45 4l-3 88q13 1 40 1q60 0 112 -4q132 -7 166 -7q86 0 168 3q116 4 146 5q56 0 86 2l-1 -14l2 -64v-9q-60 -9 -124 -9q-60 0 -79 -25q-13 -14 -13 -132q0 -13 0.5 -32.5t0.5 -25.5l1 -229l14 -280q6 -124 51 -202q35 -59 96 -92q88 -47 177 -47 q104 0 191 28q56 18 99 51q48 36 65 64q36 56 53 114q21 73 21 229q0 79 -3.5 128t-11 122.5t-13.5 159.5l-4 59q-5 67 -24 88q-34 35 -77 34l-100 -2l-14 3l2 86h84l205 -10q76 -3 196 10l18 -2q6 -38 6 -51q0 -7 -4 -31q-45 -12 -84 -13q-73 -11 -79 -17q-15 -15 -15 -41 q0 -7 1.5 -27t1.5 -31q8 -19 22 -396q6 -195 -15 -304q-15 -76 -41 -122q-38 -65 -112 -123q-75 -57 -182 -89q-109 -33 -255 -33q-167 0 -284 46q-119 47 -179 122q-61 76 -83 195q-16 80 -16 237v333q0 188 -17 213q-25 36 -147 39zM1536 -96v64q0 14 -9 23t-23 9h-1472 q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h1472q14 0 23 9t9 23z" />
+<glyph unicode="&#xf0ce;" horiz-adv-x="1664" d="M512 160v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM512 544v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1024 160v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23 v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM512 928v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1024 544v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1536 160v192 q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1024 928v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1536 544v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192 q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1536 928v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1664 1248v-1088q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1344q66 0 113 -47t47 -113 z" />
+<glyph unicode="&#xf0d0;" horiz-adv-x="1664" d="M1190 955l293 293l-107 107l-293 -293zM1637 1248q0 -27 -18 -45l-1286 -1286q-18 -18 -45 -18t-45 18l-198 198q-18 18 -18 45t18 45l1286 1286q18 18 45 18t45 -18l198 -198q18 -18 18 -45zM286 1438l98 -30l-98 -30l-30 -98l-30 98l-98 30l98 30l30 98zM636 1276 l196 -60l-196 -60l-60 -196l-60 196l-196 60l196 60l60 196zM1566 798l98 -30l-98 -30l-30 -98l-30 98l-98 30l98 30l30 98zM926 1438l98 -30l-98 -30l-30 -98l-30 98l-98 30l98 30l30 98z" />
+<glyph unicode="&#xf0d1;" horiz-adv-x="1792" d="M640 128q0 52 -38 90t-90 38t-90 -38t-38 -90t38 -90t90 -38t90 38t38 90zM256 640h384v256h-158q-13 0 -22 -9l-195 -195q-9 -9 -9 -22v-30zM1536 128q0 52 -38 90t-90 38t-90 -38t-38 -90t38 -90t90 -38t90 38t38 90zM1792 1216v-1024q0 -15 -4 -26.5t-13.5 -18.5 t-16.5 -11.5t-23.5 -6t-22.5 -2t-25.5 0t-22.5 0.5q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-64q-3 0 -22.5 -0.5t-25.5 0t-22.5 2t-23.5 6t-16.5 11.5t-13.5 18.5t-4 26.5q0 26 19 45t45 19v320q0 8 -0.5 35t0 38 t2.5 34.5t6.5 37t14 30.5t22.5 30l198 198q19 19 50.5 32t58.5 13h160v192q0 26 19 45t45 19h1024q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0d2;" d="M1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103q-111 0 -218 32q59 93 78 164q9 34 54 211q20 -39 73 -67.5t114 -28.5q121 0 216 68.5t147 188.5t52 270q0 114 -59.5 214t-172.5 163t-255 63q-105 0 -196 -29t-154.5 -77t-109 -110.5t-67 -129.5t-21.5 -134 q0 -104 40 -183t117 -111q30 -12 38 20q2 7 8 31t8 30q6 23 -11 43q-51 61 -51 151q0 151 104.5 259.5t273.5 108.5q151 0 235.5 -82t84.5 -213q0 -170 -68.5 -289t-175.5 -119q-61 0 -98 43.5t-23 104.5q8 35 26.5 93.5t30 103t11.5 75.5q0 50 -27 83t-77 33 q-62 0 -105 -57t-43 -142q0 -73 25 -122l-99 -418q-17 -70 -13 -177q-206 91 -333 281t-127 423q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf0d3;" d="M1248 1408q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-725q85 122 108 210q9 34 53 209q21 -39 73.5 -67t112.5 -28q181 0 295.5 147.5t114.5 373.5q0 84 -35 162.5t-96.5 139t-152.5 97t-197 36.5q-104 0 -194.5 -28.5t-153 -76.5 t-107.5 -109.5t-66.5 -128t-21.5 -132.5q0 -102 39.5 -180t116.5 -110q13 -5 23.5 0t14.5 19q10 44 15 61q6 23 -11 42q-50 62 -50 150q0 150 103.5 256.5t270.5 106.5q149 0 232.5 -81t83.5 -210q0 -168 -67.5 -286t-173.5 -118q-60 0 -97 43.5t-23 103.5q8 34 26.5 92.5 t29.5 102t11 74.5q0 49 -26.5 81.5t-75.5 32.5q-61 0 -103.5 -56.5t-42.5 -139.5q0 -72 24 -121l-98 -414q-24 -100 -7 -254h-183q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960z" />
+<glyph unicode="&#xf0d4;" d="M678 -57q0 -38 -10 -71h-380q-95 0 -171.5 56.5t-103.5 147.5q24 45 69 77.5t100 49.5t107 24t107 7q32 0 49 -2q6 -4 30.5 -21t33 -23t31 -23t32 -25.5t27.5 -25.5t26.5 -29.5t21 -30.5t17.5 -34.5t9.5 -36t4.5 -40.5zM385 294q-234 -7 -385 -85v433q103 -118 273 -118 q32 0 70 5q-21 -61 -21 -86q0 -67 63 -149zM558 805q0 -100 -43.5 -160.5t-140.5 -60.5q-51 0 -97 26t-78 67.5t-56 93.5t-35.5 104t-11.5 99q0 96 51.5 165t144.5 69q66 0 119 -41t84 -104t47 -130t16 -128zM1536 896v-736q0 -119 -84.5 -203.5t-203.5 -84.5h-468 q39 73 39 157q0 66 -22 122.5t-55.5 93t-72 71t-72 59.5t-55.5 54.5t-22 59.5q0 36 23 68t56 61.5t65.5 64.5t55.5 93t23 131t-26.5 145.5t-75.5 118.5q-6 6 -14 11t-12.5 7.5t-10 9.5t-10.5 17h135l135 64h-437q-138 0 -244.5 -38.5t-182.5 -133.5q0 126 81 213t207 87h960 q119 0 203.5 -84.5t84.5 -203.5v-96h-256v256h-128v-256h-256v-128h256v-256h128v256h256z" />
+<glyph unicode="&#xf0d5;" horiz-adv-x="1664" d="M876 71q0 21 -4.5 40.5t-9.5 36t-17.5 34.5t-21 30.5t-26.5 29.5t-27.5 25.5t-32 25.5t-31 23t-33 23t-30.5 21q-17 2 -50 2q-54 0 -106 -7t-108 -25t-98 -46t-69 -75t-27 -107q0 -68 35.5 -121.5t93 -84t120.5 -45.5t127 -15q59 0 112.5 12.5t100.5 39t74.5 73.5 t27.5 110zM756 933q0 60 -16.5 127.5t-47 130.5t-84 104t-119.5 41q-93 0 -144 -69t-51 -165q0 -47 11.5 -99t35.5 -104t56 -93.5t78 -67.5t97 -26q97 0 140.5 60.5t43.5 160.5zM625 1408h437l-135 -79h-135q71 -45 110 -126t39 -169q0 -74 -23 -131.5t-56 -92.5t-66 -64.5 t-56 -61t-23 -67.5q0 -26 16.5 -51t43 -48t58.5 -48t64 -55.5t58.5 -66t43 -85t16.5 -106.5q0 -160 -140 -282q-152 -131 -420 -131q-59 0 -119.5 10t-122 33.5t-108.5 58t-77 89t-30 121.5q0 61 37 135q32 64 96 110.5t145 71t155 36t150 13.5q-64 83 -64 149q0 12 2 23.5 t5 19.5t8 21.5t7 21.5q-40 -5 -70 -5q-149 0 -255.5 98t-106.5 246q0 140 95 250.5t234 141.5q94 20 187 20zM1664 1152v-128h-256v-256h-128v256h-256v128h256v256h128v-256h256z" />
+<glyph unicode="&#xf0d6;" horiz-adv-x="1920" d="M768 384h384v96h-128v448h-114l-148 -137l77 -80q42 37 55 57h2v-288h-128v-96zM1280 640q0 -70 -21 -142t-59.5 -134t-101.5 -101t-138 -39t-138 39t-101.5 101t-59.5 134t-21 142t21 142t59.5 134t101.5 101t138 39t138 -39t101.5 -101t59.5 -134t21 -142zM1792 384 v512q-106 0 -181 75t-75 181h-1152q0 -106 -75 -181t-181 -75v-512q106 0 181 -75t75 -181h1152q0 106 75 181t181 75zM1920 1216v-1152q0 -26 -19 -45t-45 -19h-1792q-26 0 -45 19t-19 45v1152q0 26 19 45t45 19h1792q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0d7;" horiz-adv-x="1024" d="M1024 832q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45t19 45t45 19h896q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0d8;" horiz-adv-x="1024" d="M1024 320q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45z" />
+<glyph unicode="&#xf0d9;" horiz-adv-x="640" d="M640 1088v-896q0 -26 -19 -45t-45 -19t-45 19l-448 448q-19 19 -19 45t19 45l448 448q19 19 45 19t45 -19t19 -45z" />
+<glyph unicode="&#xf0da;" horiz-adv-x="640" d="M576 640q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19t-19 45v896q0 26 19 45t45 19t45 -19l448 -448q19 -19 19 -45z" />
+<glyph unicode="&#xf0db;" horiz-adv-x="1664" d="M160 0h608v1152h-640v-1120q0 -13 9.5 -22.5t22.5 -9.5zM1536 32v1120h-640v-1152h608q13 0 22.5 9.5t9.5 22.5zM1664 1248v-1216q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1344q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf0dc;" horiz-adv-x="1024" d="M1024 448q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45t19 45t45 19h896q26 0 45 -19t19 -45zM1024 832q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45z" />
+<glyph unicode="&#xf0dd;" horiz-adv-x="1024" d="M1024 448q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45t19 45t45 19h896q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0de;" horiz-adv-x="1024" d="M1024 832q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45z" />
+<glyph unicode="&#xf0e0;" horiz-adv-x="1792" d="M1792 826v-794q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v794q44 -49 101 -87q362 -246 497 -345q57 -42 92.5 -65.5t94.5 -48t110 -24.5h1h1q51 0 110 24.5t94.5 48t92.5 65.5q170 123 498 345q57 39 100 87zM1792 1120q0 -79 -49 -151t-122 -123 q-376 -261 -468 -325q-10 -7 -42.5 -30.5t-54 -38t-52 -32.5t-57.5 -27t-50 -9h-1h-1q-23 0 -50 9t-57.5 27t-52 32.5t-54 38t-42.5 30.5q-91 64 -262 182.5t-205 142.5q-62 42 -117 115.5t-55 136.5q0 78 41.5 130t118.5 52h1472q65 0 112.5 -47t47.5 -113z" />
+<glyph unicode="&#xf0e1;" d="M349 911v-991h-330v991h330zM370 1217q1 -73 -50.5 -122t-135.5 -49h-2q-82 0 -132 49t-50 122q0 74 51.5 122.5t134.5 48.5t133 -48.5t51 -122.5zM1536 488v-568h-329v530q0 105 -40.5 164.5t-126.5 59.5q-63 0 -105.5 -34.5t-63.5 -85.5q-11 -30 -11 -81v-553h-329 q2 399 2 647t-1 296l-1 48h329v-144h-2q20 32 41 56t56.5 52t87 43.5t114.5 15.5q171 0 275 -113.5t104 -332.5z" />
+<glyph unicode="&#xf0e2;" d="M1536 640q0 -156 -61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5 t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298z" />
+<glyph unicode="&#xf0e3;" horiz-adv-x="1792" d="M1771 0q0 -53 -37 -90l-107 -108q-39 -37 -91 -37q-53 0 -90 37l-363 364q-38 36 -38 90q0 53 43 96l-256 256l-126 -126q-14 -14 -34 -14t-34 14q2 -2 12.5 -12t12.5 -13t10 -11.5t10 -13.5t6 -13.5t5.5 -16.5t1.5 -18q0 -38 -28 -68q-3 -3 -16.5 -18t-19 -20.5 t-18.5 -16.5t-22 -15.5t-22 -9t-26 -4.5q-40 0 -68 28l-408 408q-28 28 -28 68q0 13 4.5 26t9 22t15.5 22t16.5 18.5t20.5 19t18 16.5q30 28 68 28q10 0 18 -1.5t16.5 -5.5t13.5 -6t13.5 -10t11.5 -10t13 -12.5t12 -12.5q-14 14 -14 34t14 34l348 348q14 14 34 14t34 -14 q-2 2 -12.5 12t-12.5 13t-10 11.5t-10 13.5t-6 13.5t-5.5 16.5t-1.5 18q0 38 28 68q3 3 16.5 18t19 20.5t18.5 16.5t22 15.5t22 9t26 4.5q40 0 68 -28l408 -408q28 -28 28 -68q0 -13 -4.5 -26t-9 -22t-15.5 -22t-16.5 -18.5t-20.5 -19t-18 -16.5q-30 -28 -68 -28 q-10 0 -18 1.5t-16.5 5.5t-13.5 6t-13.5 10t-11.5 10t-13 12.5t-12 12.5q14 -14 14 -34t-14 -34l-126 -126l256 -256q43 43 96 43q52 0 91 -37l363 -363q37 -39 37 -91z" />
+<glyph unicode="&#xf0e4;" horiz-adv-x="1792" d="M384 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM576 832q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1004 351l101 382q6 26 -7.5 48.5t-38.5 29.5 t-48 -6.5t-30 -39.5l-101 -382q-60 -5 -107 -43.5t-63 -98.5q-20 -77 20 -146t117 -89t146 20t89 117q16 60 -6 117t-72 91zM1664 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1024 1024q0 53 -37.5 90.5 t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1472 832q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1792 384q0 -261 -141 -483q-19 -29 -54 -29h-1402q-35 0 -54 29 q-141 221 -141 483q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+<glyph unicode="&#xf0e5;" horiz-adv-x="1792" d="M896 1152q-204 0 -381.5 -69.5t-282 -187.5t-104.5 -255q0 -112 71.5 -213.5t201.5 -175.5l87 -50l-27 -96q-24 -91 -70 -172q152 63 275 171l43 38l57 -6q69 -8 130 -8q204 0 381.5 69.5t282 187.5t104.5 255t-104.5 255t-282 187.5t-381.5 69.5zM1792 640 q0 -174 -120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22h-5q-15 0 -27 10.5t-16 27.5v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281q0 174 120 321.5 t326 233t450 85.5t450 -85.5t326 -233t120 -321.5z" />
+<glyph unicode="&#xf0e6;" horiz-adv-x="1792" d="M704 1152q-153 0 -286 -52t-211.5 -141t-78.5 -191q0 -82 53 -158t149 -132l97 -56l-35 -84q34 20 62 39l44 31l53 -10q78 -14 153 -14q153 0 286 52t211.5 141t78.5 191t-78.5 191t-211.5 141t-286 52zM704 1280q191 0 353.5 -68.5t256.5 -186.5t94 -257t-94 -257 t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224q0 139 94 257t256.5 186.5 t353.5 68.5zM1526 111q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132q58 -4 88 -4q161 0 309 45t264 129 q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5z" />
+<glyph unicode="&#xf0e7;" horiz-adv-x="896" d="M885 970q18 -20 7 -44l-540 -1157q-13 -25 -42 -25q-4 0 -14 2q-17 5 -25.5 19t-4.5 30l197 808l-406 -101q-4 -1 -12 -1q-18 0 -31 11q-18 15 -13 39l201 825q4 14 16 23t28 9h328q19 0 32 -12.5t13 -29.5q0 -8 -5 -18l-171 -463l396 98q8 2 12 2q19 0 34 -15z" />
+<glyph unicode="&#xf0e8;" horiz-adv-x="1792" d="M1792 288v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320 q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192q0 52 38 90t90 38h512v192h-96q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-96v-192h512q52 0 90 -38t38 -90v-192h96q40 0 68 -28t28 -68 z" />
+<glyph unicode="&#xf0e9;" horiz-adv-x="1664" d="M896 708v-580q0 -104 -76 -180t-180 -76t-180 76t-76 180q0 26 19 45t45 19t45 -19t19 -45q0 -50 39 -89t89 -39t89 39t39 89v580q33 11 64 11t64 -11zM1664 681q0 -13 -9.5 -22.5t-22.5 -9.5q-11 0 -23 10q-49 46 -93 69t-102 23q-68 0 -128 -37t-103 -97 q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -28 -17q-18 0 -29 17q-4 6 -14.5 24t-17.5 28q-43 60 -102.5 97t-127.5 37t-127.5 -37t-102.5 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -29 -17q-17 0 -28 17q-4 6 -14.5 24t-17.5 28q-43 60 -103 97t-128 37q-58 0 -102 -23t-93 -69 q-12 -10 -23 -10q-13 0 -22.5 9.5t-9.5 22.5q0 5 1 7q45 183 172.5 319.5t298 204.5t360.5 68q140 0 274.5 -40t246.5 -113.5t194.5 -187t115.5 -251.5q1 -2 1 -7zM896 1408v-98q-42 2 -64 2t-64 -2v98q0 26 19 45t45 19t45 -19t19 -45z" />
+<glyph unicode="&#xf0ea;" horiz-adv-x="1792" d="M768 -128h896v640h-416q-40 0 -68 28t-28 68v416h-384v-1152zM1024 1312v64q0 13 -9.5 22.5t-22.5 9.5h-704q-13 0 -22.5 -9.5t-9.5 -22.5v-64q0 -13 9.5 -22.5t22.5 -9.5h704q13 0 22.5 9.5t9.5 22.5zM1280 640h299l-299 299v-299zM1792 512v-672q0 -40 -28 -68t-68 -28 h-960q-40 0 -68 28t-28 68v160h-544q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h1088q40 0 68 -28t28 -68v-328q21 -13 36 -28l408 -408q28 -28 48 -76t20 -88z" />
+<glyph unicode="&#xf0eb;" horiz-adv-x="1024" d="M736 960q0 -13 -9.5 -22.5t-22.5 -9.5t-22.5 9.5t-9.5 22.5q0 46 -54 71t-106 25q-13 0 -22.5 9.5t-9.5 22.5t9.5 22.5t22.5 9.5q50 0 99.5 -16t87 -54t37.5 -90zM896 960q0 72 -34.5 134t-90 101.5t-123 62t-136.5 22.5t-136.5 -22.5t-123 -62t-90 -101.5t-34.5 -134 q0 -101 68 -180q10 -11 30.5 -33t30.5 -33q128 -153 141 -298h228q13 145 141 298q10 11 30.5 33t30.5 33q68 79 68 180zM1024 960q0 -155 -103 -268q-45 -49 -74.5 -87t-59.5 -95.5t-34 -107.5q47 -28 47 -82q0 -37 -25 -64q25 -27 25 -64q0 -52 -45 -81q13 -23 13 -47 q0 -46 -31.5 -71t-77.5 -25q-20 -44 -60 -70t-87 -26t-87 26t-60 70q-46 0 -77.5 25t-31.5 71q0 24 13 47q-45 29 -45 81q0 37 25 64q-25 27 -25 64q0 54 47 82q-4 50 -34 107.5t-59.5 95.5t-74.5 87q-103 113 -103 268q0 99 44.5 184.5t117 142t164 89t186.5 32.5 t186.5 -32.5t164 -89t117 -142t44.5 -184.5z" />
+<glyph unicode="&#xf0ec;" horiz-adv-x="1792" d="M1792 352v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5q-12 0 -24 10l-319 320q-9 9 -9 22q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h1376q13 0 22.5 -9.5t9.5 -22.5zM1792 896q0 -14 -9 -23l-320 -320q-9 -9 -23 -9 q-13 0 -22.5 9.5t-9.5 22.5v192h-1376q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1376v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23z" />
+<glyph unicode="&#xf0ed;" horiz-adv-x="1920" d="M1280 608q0 14 -9 23t-23 9h-224v352q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-352h-224q-13 0 -22.5 -9.5t-9.5 -22.5q0 -14 9 -23l352 -352q9 -9 23 -9t23 9l351 351q10 12 10 24zM1920 384q0 -159 -112.5 -271.5t-271.5 -112.5h-1088 q-185 0 -316.5 131.5t-131.5 316.5q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5z" />
+<glyph unicode="&#xf0ee;" horiz-adv-x="1920" d="M1280 672q0 14 -9 23l-352 352q-9 9 -23 9t-23 -9l-351 -351q-10 -12 -10 -24q0 -14 9 -23t23 -9h224v-352q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v352h224q13 0 22.5 9.5t9.5 22.5zM1920 384q0 -159 -112.5 -271.5t-271.5 -112.5h-1088 q-185 0 -316.5 131.5t-131.5 316.5q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5z" />
+<glyph unicode="&#xf0f0;" horiz-adv-x="1408" d="M384 192q0 -26 -19 -45t-45 -19t-45 19t-19 45t19 45t45 19t45 -19t19 -45zM1408 131q0 -121 -73 -190t-194 -69h-874q-121 0 -194 69t-73 190q0 68 5.5 131t24 138t47.5 132.5t81 103t120 60.5q-22 -52 -22 -120v-203q-58 -20 -93 -70t-35 -111q0 -80 56 -136t136 -56 t136 56t56 136q0 61 -35.5 111t-92.5 70v203q0 62 25 93q132 -104 295 -104t295 104q25 -31 25 -93v-64q-106 0 -181 -75t-75 -181v-89q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 52 38 90t90 38t90 -38t38 -90v-89q-32 -29 -32 -71q0 -40 28 -68 t68 -28t68 28t28 68q0 42 -32 71v89q0 68 -34.5 127.5t-93.5 93.5q0 10 0.5 42.5t0 48t-2.5 41.5t-7 47t-13 40q68 -15 120 -60.5t81 -103t47.5 -132.5t24 -138t5.5 -131zM1088 1024q0 -159 -112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5 t271.5 -112.5t112.5 -271.5z" />
+<glyph unicode="&#xf0f1;" horiz-adv-x="1408" d="M1280 832q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 832q0 -62 -35.5 -111t-92.5 -70v-395q0 -159 -131.5 -271.5t-316.5 -112.5t-316.5 112.5t-131.5 271.5v132q-164 20 -274 128t-110 252v512q0 26 19 45t45 19q6 0 16 -2q17 30 47 48 t65 18q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5q-33 0 -64 18v-402q0 -106 94 -181t226 -75t226 75t94 181v402q-31 -18 -64 -18q-53 0 -90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5q35 0 65 -18t47 -48q10 2 16 2q26 0 45 -19t19 -45v-512q0 -144 -110 -252 t-274 -128v-132q0 -106 94 -181t226 -75t226 75t94 181v395q-57 21 -92.5 70t-35.5 111q0 80 56 136t136 56t136 -56t56 -136z" />
+<glyph unicode="&#xf0f2;" horiz-adv-x="1792" d="M640 1152h512v128h-512v-128zM288 1152v-1280h-64q-92 0 -158 66t-66 158v832q0 92 66 158t158 66h64zM1408 1152v-1280h-1024v1280h128v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h128zM1792 928v-832q0 -92 -66 -158t-158 -66h-64v1280h64q92 0 158 -66 t66 -158z" />
+<glyph unicode="&#xf0f3;" horiz-adv-x="1664" d="M848 -160q0 16 -16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16zM1664 128q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38t-38 90q190 161 287 397.5t97 498.5 q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5z" />
+<glyph unicode="&#xf0f4;" horiz-adv-x="1920" d="M1664 896q0 80 -56 136t-136 56h-64v-384h64q80 0 136 56t56 136zM0 128h1792q0 -106 -75 -181t-181 -75h-1280q-106 0 -181 75t-75 181zM1856 896q0 -159 -112.5 -271.5t-271.5 -112.5h-64v-32q0 -92 -66 -158t-158 -66h-704q-92 0 -158 66t-66 158v736q0 26 19 45 t45 19h1152q159 0 271.5 -112.5t112.5 -271.5z" />
+<glyph unicode="&#xf0f5;" horiz-adv-x="1408" d="M640 1472v-640q0 -61 -35.5 -111t-92.5 -70v-779q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v779q-57 20 -92.5 70t-35.5 111v640q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45 t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45zM1408 1472v-1600q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v512h-224q-13 0 -22.5 9.5t-9.5 22.5v800q0 132 94 226t226 94h256q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0f6;" horiz-adv-x="1280" d="M1024 352v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23zM1024 608v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23zM128 0h1024v768h-416q-40 0 -68 28t-28 68v416h-512v-1280z M768 896h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376zM1280 864v-896q0 -40 -28 -68t-68 -28h-1088q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h640q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88z" />
+<glyph unicode="&#xf0f7;" horiz-adv-x="1408" d="M384 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M640 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M1152 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M640 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M1152 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M640 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M1152 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M640 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M896 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M896 -128h384v1536h-1152v-1536h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224zM1408 1472v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0f8;" horiz-adv-x="1408" d="M384 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M640 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M1152 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M640 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M896 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z M896 -128h384v1152h-256v-32q0 -40 -28 -68t-68 -28h-448q-40 0 -68 28t-28 68v32h-256v-1152h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224zM896 1056v320q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-96h-128v96q0 13 -9.5 22.5 t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-320q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v96h128v-96q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5zM1408 1088v-1280q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v1280q0 26 19 45t45 19h320 v288q0 40 28 68t68 28h448q40 0 68 -28t28 -68v-288h320q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0f9;" horiz-adv-x="1920" d="M640 128q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM256 640h384v256h-158q-14 -2 -22 -9l-195 -195q-7 -12 -9 -22v-30zM1536 128q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5 t90.5 37.5t37.5 90.5zM1664 800v192q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v224h224q14 0 23 9t9 23zM1920 1344v-1152 q0 -26 -19 -45t-45 -19h-192q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-128q-26 0 -45 19t-19 45t19 45t45 19v416q0 26 13 58t32 51l198 198q19 19 51 32t58 13h160v320q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf0fa;" horiz-adv-x="1792" d="M1280 416v192q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v224h224q14 0 23 9t9 23zM640 1152h512v128h-512v-128zM256 1152v-1280h-32 q-92 0 -158 66t-66 158v832q0 92 66 158t158 66h32zM1440 1152v-1280h-1088v1280h160v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h160zM1792 928v-832q0 -92 -66 -158t-158 -66h-32v1280h32q92 0 158 -66t66 -158z" />
+<glyph unicode="&#xf0fb;" horiz-adv-x="1920" d="M1920 576q-1 -32 -288 -96l-352 -32l-224 -64h-64l-293 -352h69q26 0 45 -4.5t19 -11.5t-19 -11.5t-45 -4.5h-96h-160h-64v32h64v416h-160l-192 -224h-96l-32 32v192h32v32h128v8l-192 24v128l192 24v8h-128v32h-32v192l32 32h96l192 -224h160v416h-64v32h64h160h96 q26 0 45 -4.5t19 -11.5t-19 -11.5t-45 -4.5h-69l293 -352h64l224 -64l352 -32q261 -58 287 -93z" />
+<glyph unicode="&#xf0fc;" horiz-adv-x="1664" d="M640 640v384h-256v-256q0 -53 37.5 -90.5t90.5 -37.5h128zM1664 192v-192h-1152v192l128 192h-128q-159 0 -271.5 112.5t-112.5 271.5v320l-64 64l32 128h480l32 128h960l32 -192l-64 -32v-800z" />
+<glyph unicode="&#xf0fd;" d="M1280 192v896q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-512v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-896q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h512v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1536 1120v-960 q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf0fe;" d="M1280 576v128q0 26 -19 45t-45 19h-320v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-320q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h320v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h320q26 0 45 19t19 45zM1536 1120v-960 q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf100;" horiz-adv-x="1024" d="M627 160q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23zM1011 160q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23 t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23z" />
+<glyph unicode="&#xf101;" horiz-adv-x="1024" d="M595 576q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23zM979 576q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23 l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+<glyph unicode="&#xf102;" horiz-adv-x="1152" d="M1075 224q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23zM1075 608q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393 q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+<glyph unicode="&#xf103;" horiz-adv-x="1152" d="M1075 672q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23zM1075 1056q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23 t10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23z" />
+<glyph unicode="&#xf104;" horiz-adv-x="640" d="M627 992q0 -13 -10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23z" />
+<glyph unicode="&#xf105;" horiz-adv-x="640" d="M595 576q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+<glyph unicode="&#xf106;" horiz-adv-x="1152" d="M1075 352q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+<glyph unicode="&#xf107;" horiz-adv-x="1152" d="M1075 800q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23z" />
+<glyph unicode="&#xf108;" horiz-adv-x="1920" d="M1792 544v832q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-832q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5zM1920 1376v-1088q0 -66 -47 -113t-113 -47h-544q0 -37 16 -77.5t32 -71t16 -43.5q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19 t-19 45q0 14 16 44t32 70t16 78h-544q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf109;" horiz-adv-x="1920" d="M416 256q-66 0 -113 47t-47 113v704q0 66 47 113t113 47h1088q66 0 113 -47t47 -113v-704q0 -66 -47 -113t-113 -47h-1088zM384 1120v-704q0 -13 9.5 -22.5t22.5 -9.5h1088q13 0 22.5 9.5t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-1088q-13 0 -22.5 -9.5t-9.5 -22.5z M1760 192h160v-96q0 -40 -47 -68t-113 -28h-1600q-66 0 -113 28t-47 68v96h160h1600zM1040 96q16 0 16 16t-16 16h-160q-16 0 -16 -16t16 -16h160z" />
+<glyph unicode="&#xf10a;" horiz-adv-x="1152" d="M640 128q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1024 288v960q0 13 -9.5 22.5t-22.5 9.5h-832q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h832q13 0 22.5 9.5t9.5 22.5zM1152 1248v-1088q0 -66 -47 -113t-113 -47h-832 q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h832q66 0 113 -47t47 -113z" />
+<glyph unicode="&#xf10b;" horiz-adv-x="768" d="M464 128q0 33 -23.5 56.5t-56.5 23.5t-56.5 -23.5t-23.5 -56.5t23.5 -56.5t56.5 -23.5t56.5 23.5t23.5 56.5zM672 288v704q0 13 -9.5 22.5t-22.5 9.5h-512q-13 0 -22.5 -9.5t-9.5 -22.5v-704q0 -13 9.5 -22.5t22.5 -9.5h512q13 0 22.5 9.5t9.5 22.5zM480 1136 q0 16 -16 16h-160q-16 0 -16 -16t16 -16h160q16 0 16 16zM768 1152v-1024q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v1024q0 52 38 90t90 38h512q52 0 90 -38t38 -90z" />
+<glyph unicode="&#xf10c;" d="M768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103 t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf10d;" horiz-adv-x="1664" d="M768 576v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136z M1664 576v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136z" />
+<glyph unicode="&#xf10e;" horiz-adv-x="1664" d="M768 1216v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136v384q0 80 56 136t136 56h384q80 0 136 -56t56 -136zM1664 1216 v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136v384q0 80 56 136t136 56h384q80 0 136 -56t56 -136z" />
+<glyph unicode="&#xf110;" horiz-adv-x="1568" d="M496 192q0 -60 -42.5 -102t-101.5 -42q-60 0 -102 42t-42 102t42 102t102 42q59 0 101.5 -42t42.5 -102zM928 0q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM320 640q0 -66 -47 -113t-113 -47t-113 47t-47 113 t47 113t113 47t113 -47t47 -113zM1360 192q0 -46 -33 -79t-79 -33t-79 33t-33 79t33 79t79 33t79 -33t33 -79zM528 1088q0 -73 -51.5 -124.5t-124.5 -51.5t-124.5 51.5t-51.5 124.5t51.5 124.5t124.5 51.5t124.5 -51.5t51.5 -124.5zM992 1280q0 -80 -56 -136t-136 -56 t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1536 640q0 -40 -28 -68t-68 -28t-68 28t-28 68t28 68t68 28t68 -28t28 -68zM1328 1088q0 -33 -23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5t23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5z" />
+<glyph unicode="&#xf111;" d="M1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf112;" horiz-adv-x="1792" d="M1792 416q0 -166 -127 -451q-3 -7 -10.5 -24t-13.5 -30t-13 -22q-12 -17 -28 -17q-15 0 -23.5 10t-8.5 25q0 9 2.5 26.5t2.5 23.5q5 68 5 123q0 101 -17.5 181t-48.5 138.5t-80 101t-105.5 69.5t-133 42.5t-154 21.5t-175.5 6h-224v-256q0 -26 -19 -45t-45 -19t-45 19 l-512 512q-19 19 -19 45t19 45l512 512q19 19 45 19t45 -19t19 -45v-256h224q713 0 875 -403q53 -134 53 -333z" />
+<glyph unicode="&#xf113;" horiz-adv-x="1664" d="M640 320q0 -40 -12.5 -82t-43 -76t-72.5 -34t-72.5 34t-43 76t-12.5 82t12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82zM1280 320q0 -40 -12.5 -82t-43 -76t-72.5 -34t-72.5 34t-43 76t-12.5 82t12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82zM1440 320 q0 120 -69 204t-187 84q-41 0 -195 -21q-71 -11 -157 -11t-157 11q-152 21 -195 21q-118 0 -187 -84t-69 -204q0 -88 32 -153.5t81 -103t122 -60t140 -29.5t149 -7h168q82 0 149 7t140 29.5t122 60t81 103t32 153.5zM1664 496q0 -207 -61 -331q-38 -77 -105.5 -133t-141 -86 t-170 -47.5t-171.5 -22t-167 -4.5q-78 0 -142 3t-147.5 12.5t-152.5 30t-137 51.5t-121 81t-86 115q-62 123 -62 331q0 237 136 396q-27 82 -27 170q0 116 51 218q108 0 190 -39.5t189 -123.5q147 35 309 35q148 0 280 -32q105 82 187 121t189 39q51 -102 51 -218 q0 -87 -27 -168q136 -160 136 -398z" />
+<glyph unicode="&#xf114;" horiz-adv-x="1664" d="M1536 224v704q0 40 -28 68t-68 28h-704q-40 0 -68 28t-28 68v64q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-960q0 -40 28 -68t68 -28h1216q40 0 68 28t28 68zM1664 928v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158v960q0 92 66 158t158 66h320 q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158z" />
+<glyph unicode="&#xf115;" horiz-adv-x="1920" d="M1781 605q0 35 -53 35h-1088q-40 0 -85.5 -21.5t-71.5 -52.5l-294 -363q-18 -24 -18 -40q0 -35 53 -35h1088q40 0 86 22t71 53l294 363q18 22 18 39zM640 768h768v160q0 40 -28 68t-68 28h-576q-40 0 -68 28t-28 68v64q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68 v-853l256 315q44 53 116 87.5t140 34.5zM1909 605q0 -62 -46 -120l-295 -363q-43 -53 -116 -87.5t-140 -34.5h-1088q-92 0 -158 66t-66 158v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h192q54 0 99 -24.5t67 -70.5q15 -32 15 -68z " />
+<glyph unicode="&#xf116;" horiz-adv-x="1792" />
+<glyph unicode="&#xf117;" horiz-adv-x="1792" />
+<glyph unicode="&#xf118;" d="M1134 461q-37 -121 -138 -195t-228 -74t-228 74t-138 195q-8 25 4 48.5t38 31.5q25 8 48.5 -4t31.5 -38q25 -80 92.5 -129.5t151.5 -49.5t151.5 49.5t92.5 129.5q8 26 32 38t49 4t37 -31.5t4 -48.5zM640 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5 t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1152 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5 t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf119;" d="M1134 307q8 -25 -4 -48.5t-37 -31.5t-49 4t-32 38q-25 80 -92.5 129.5t-151.5 49.5t-151.5 -49.5t-92.5 -129.5q-8 -26 -31.5 -38t-48.5 -4q-26 8 -38 31.5t-4 48.5q37 121 138 195t228 74t228 -74t138 -195zM640 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5 t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1152 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204 t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf11a;" d="M1152 448q0 -26 -19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h640q26 0 45 -19t19 -45zM640 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1152 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5 t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf11b;" horiz-adv-x="1920" d="M832 448v128q0 14 -9 23t-23 9h-192v192q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-192h-192q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h192v-192q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v192h192q14 0 23 9t9 23zM1408 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5 t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1664 640q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1920 512q0 -212 -150 -362t-362 -150q-192 0 -338 128h-220q-146 -128 -338 -128q-212 0 -362 150 t-150 362t150 362t362 150h896q212 0 362 -150t150 -362z" />
+<glyph unicode="&#xf11c;" horiz-adv-x="1920" d="M384 368v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM512 624v-96q0 -16 -16 -16h-224q-16 0 -16 16v96q0 16 16 16h224q16 0 16 -16zM384 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1408 368v-96q0 -16 -16 -16 h-864q-16 0 -16 16v96q0 16 16 16h864q16 0 16 -16zM768 624v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM640 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1024 624v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16 h96q16 0 16 -16zM896 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1280 624v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1664 368v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1152 880v-96 q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1408 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1664 880v-352q0 -16 -16 -16h-224q-16 0 -16 16v96q0 16 16 16h112v240q0 16 16 16h96q16 0 16 -16zM1792 128v896h-1664v-896 h1664zM1920 1024v-896q0 -53 -37.5 -90.5t-90.5 -37.5h-1664q-53 0 -90.5 37.5t-37.5 90.5v896q0 53 37.5 90.5t90.5 37.5h1664q53 0 90.5 -37.5t37.5 -90.5z" />
+<glyph unicode="&#xf11d;" horiz-adv-x="1792" d="M1664 491v616q-169 -91 -306 -91q-82 0 -145 32q-100 49 -184 76.5t-178 27.5q-173 0 -403 -127v-599q245 113 433 113q55 0 103.5 -7.5t98 -26t77 -31t82.5 -39.5l28 -14q44 -22 101 -22q120 0 293 92zM320 1280q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9 h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1792 1216v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102 q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55z" />
+<glyph unicode="&#xf11e;" horiz-adv-x="1792" d="M832 536v192q-181 -16 -384 -117v-185q205 96 384 110zM832 954v197q-172 -8 -384 -126v-189q215 111 384 118zM1664 491v184q-235 -116 -384 -71v224q-20 6 -39 15q-5 3 -33 17t-34.5 17t-31.5 15t-34.5 15.5t-32.5 13t-36 12.5t-35 8.5t-39.5 7.5t-39.5 4t-44 2 q-23 0 -49 -3v-222h19q102 0 192.5 -29t197.5 -82q19 -9 39 -15v-188q42 -17 91 -17q120 0 293 92zM1664 918v189q-169 -91 -306 -91q-45 0 -78 8v-196q148 -42 384 90zM320 1280q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266 q-29 17 -46.5 46t-17.5 64q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1792 1216v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8 q-32 19 -32 56v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55z" />
+<glyph unicode="&#xf120;" horiz-adv-x="1664" d="M585 553l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23zM1664 96v-64q0 -14 -9 -23t-23 -9h-960q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h960q14 0 23 -9 t9 -23z" />
+<glyph unicode="&#xf121;" horiz-adv-x="1920" d="M617 137l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23zM1208 1204l-373 -1291q-4 -13 -15.5 -19.5t-23.5 -2.5l-62 17q-13 4 -19.5 15.5t-2.5 24.5 l373 1291q4 13 15.5 19.5t23.5 2.5l62 -17q13 -4 19.5 -15.5t2.5 -24.5zM1865 553l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23z" />
+<glyph unicode="&#xf122;" horiz-adv-x="1792" d="M640 454v-70q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45t19 45l512 512q29 31 70 14q39 -17 39 -59v-69l-397 -398q-19 -19 -19 -45t19 -45zM1792 416q0 -58 -17 -133.5t-38.5 -138t-48 -125t-40.5 -90.5l-20 -40q-8 -17 -28 -17q-6 0 -9 1 q-25 8 -23 34q43 400 -106 565q-64 71 -170.5 110.5t-267.5 52.5v-251q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45t19 45l512 512q29 31 70 14q39 -17 39 -59v-262q411 -28 599 -221q169 -173 169 -509z" />
+<glyph unicode="&#xf123;" horiz-adv-x="1664" d="M1186 579l257 250l-356 52l-66 10l-30 60l-159 322v-963l59 -31l318 -168l-60 355l-12 66zM1638 841l-363 -354l86 -500q5 -33 -6 -51.5t-34 -18.5q-17 0 -40 12l-449 236l-449 -236q-23 -12 -40 -12q-23 0 -34 18.5t-6 51.5l86 500l-364 354q-32 32 -23 59.5t54 34.5 l502 73l225 455q20 41 49 41q28 0 49 -41l225 -455l502 -73q45 -7 54 -34.5t-24 -59.5z" />
+<glyph unicode="&#xf124;" horiz-adv-x="1408" d="M1401 1187l-640 -1280q-17 -35 -57 -35q-5 0 -15 2q-22 5 -35.5 22.5t-13.5 39.5v576h-576q-22 0 -39.5 13.5t-22.5 35.5t4 42t29 30l1280 640q13 7 29 7q27 0 45 -19q15 -14 18.5 -34.5t-6.5 -39.5z" />
+<glyph unicode="&#xf125;" horiz-adv-x="1664" d="M557 256h595v595zM512 301l595 595h-595v-595zM1664 224v-192q0 -14 -9 -23t-23 -9h-224v-224q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v224h-864q-14 0 -23 9t-9 23v864h-224q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h224v224q0 14 9 23t23 9h192q14 0 23 -9t9 -23 v-224h851l246 247q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-247 -246v-851h224q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf126;" horiz-adv-x="1024" d="M288 64q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM288 1216q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM928 1088q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM1024 1088q0 -52 -26 -96.5t-70 -69.5 q-2 -287 -226 -414q-68 -38 -203 -81q-128 -40 -169.5 -71t-41.5 -100v-26q44 -25 70 -69.5t26 -96.5q0 -80 -56 -136t-136 -56t-136 56t-56 136q0 52 26 96.5t70 69.5v820q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136q0 -52 -26 -96.5t-70 -69.5v-497 q54 26 154 57q55 17 87.5 29.5t70.5 31t59 39.5t40.5 51t28 69.5t8.5 91.5q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136z" />
+<glyph unicode="&#xf127;" horiz-adv-x="1664" d="M439 265l-256 -256q-10 -9 -23 -9q-12 0 -23 9q-9 10 -9 23t9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23zM608 224v-320q0 -14 -9 -23t-23 -9t-23 9t-9 23v320q0 14 9 23t23 9t23 -9t9 -23zM384 448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23t9 23t23 9h320 q14 0 23 -9t9 -23zM1648 320q0 -120 -85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-334 335q-21 21 -42 56l239 18l273 -274q27 -27 68 -27.5t68 26.5l147 146q28 28 28 67q0 40 -28 68l-274 275l18 239q35 -21 56 -42l336 -336q84 -86 84 -204zM1031 1044l-239 -18 l-273 274q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67q0 -40 28 -68l274 -274l-18 -240q-35 21 -56 42l-336 336q-84 86 -84 204q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l334 -335q21 -21 42 -56zM1664 960q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9 t-9 23t9 23t23 9h320q14 0 23 -9t9 -23zM1120 1504v-320q0 -14 -9 -23t-23 -9t-23 9t-9 23v320q0 14 9 23t23 9t23 -9t9 -23zM1527 1353l-256 -256q-11 -9 -23 -9t-23 9q-9 10 -9 23t9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23z" />
+<glyph unicode="&#xf128;" horiz-adv-x="1024" d="M704 280v-240q0 -16 -12 -28t-28 -12h-240q-16 0 -28 12t-12 28v240q0 16 12 28t28 12h240q16 0 28 -12t12 -28zM1020 880q0 -54 -15.5 -101t-35 -76.5t-55 -59.5t-57.5 -43.5t-61 -35.5q-41 -23 -68.5 -65t-27.5 -67q0 -17 -12 -32.5t-28 -15.5h-240q-15 0 -25.5 18.5 t-10.5 37.5v45q0 83 65 156.5t143 108.5q59 27 84 56t25 76q0 42 -46.5 74t-107.5 32q-65 0 -108 -29q-35 -25 -107 -115q-13 -16 -31 -16q-12 0 -25 8l-164 125q-13 10 -15.5 25t5.5 28q160 266 464 266q80 0 161 -31t146 -83t106 -127.5t41 -158.5z" />
+<glyph unicode="&#xf129;" horiz-adv-x="640" d="M640 192v-128q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64v384h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-576h64q26 0 45 -19t19 -45zM512 1344v-192q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v192 q0 26 19 45t45 19h256q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf12a;" horiz-adv-x="640" d="M512 288v-224q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v224q0 26 19 45t45 19h256q26 0 45 -19t19 -45zM542 1344l-28 -768q-1 -26 -20.5 -45t-45.5 -19h-256q-26 0 -45.5 19t-20.5 45l-28 768q-1 26 17.5 45t44.5 19h320q26 0 44.5 -19t17.5 -45z" />
+<glyph unicode="&#xf12b;" d="M897 167v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109zM1534 846v-206h-514l-3 27 q-4 28 -4 46q0 64 26 117t65 86.5t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q83 65 188 65q110 0 178 -59.5t68 -158.5q0 -56 -24.5 -103t-62 -76.5t-81.5 -58.5t-82 -50.5t-65.5 -51.5t-30.5 -63h232v80 h126z" />
+<glyph unicode="&#xf12c;" d="M897 167v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109zM1536 -50v-206h-514l-4 27 q-3 45 -3 46q0 64 26 117t65 86.5t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q80 65 188 65q110 0 178 -59.5t68 -158.5q0 -66 -34.5 -118.5t-84 -86t-99.5 -62.5t-87 -63t-41 -73h232v80h126z" />
+<glyph unicode="&#xf12d;" horiz-adv-x="1920" d="M896 128l336 384h-768l-336 -384h768zM1909 1205q15 -34 9.5 -71.5t-30.5 -65.5l-896 -1024q-38 -44 -96 -44h-768q-38 0 -69.5 20.5t-47.5 54.5q-15 34 -9.5 71.5t30.5 65.5l896 1024q38 44 96 44h768q38 0 69.5 -20.5t47.5 -54.5z" />
+<glyph unicode="&#xf12e;" horiz-adv-x="1664" d="M1664 438q0 -81 -44.5 -135t-123.5 -54q-41 0 -77.5 17.5t-59 38t-56.5 38t-71 17.5q-110 0 -110 -124q0 -39 16 -115t15 -115v-5q-22 0 -33 -1q-34 -3 -97.5 -11.5t-115.5 -13.5t-98 -5q-61 0 -103 26.5t-42 83.5q0 37 17.5 71t38 56.5t38 59t17.5 77.5q0 79 -54 123.5 t-135 44.5q-84 0 -143 -45.5t-59 -127.5q0 -43 15 -83t33.5 -64.5t33.5 -53t15 -50.5q0 -45 -46 -89q-37 -35 -117 -35q-95 0 -245 24q-9 2 -27.5 4t-27.5 4l-13 2q-1 0 -3 1q-2 0 -2 1v1024q2 -1 17.5 -3.5t34 -5t21.5 -3.5q150 -24 245 -24q80 0 117 35q46 44 46 89 q0 22 -15 50.5t-33.5 53t-33.5 64.5t-15 83q0 82 59 127.5t144 45.5q80 0 134 -44.5t54 -123.5q0 -41 -17.5 -77.5t-38 -59t-38 -56.5t-17.5 -71q0 -57 42 -83.5t103 -26.5q64 0 180 15t163 17v-2q-1 -2 -3.5 -17.5t-5 -34t-3.5 -21.5q-24 -150 -24 -245q0 -80 35 -117 q44 -46 89 -46q22 0 50.5 15t53 33.5t64.5 33.5t83 15q82 0 127.5 -59t45.5 -143z" />
+<glyph unicode="&#xf130;" horiz-adv-x="1152" d="M1152 832v-128q0 -221 -147.5 -384.5t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h256v132q-217 24 -364.5 187.5t-147.5 384.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -185 131.5 -316.5t316.5 -131.5 t316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45zM896 1216v-512q0 -132 -94 -226t-226 -94t-226 94t-94 226v512q0 132 94 226t226 94t226 -94t94 -226z" />
+<glyph unicode="&#xf131;" horiz-adv-x="1408" d="M271 591l-101 -101q-42 103 -42 214v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -53 15 -113zM1385 1193l-361 -361v-128q0 -132 -94 -226t-226 -94q-55 0 -109 19l-96 -96q97 -51 205 -51q185 0 316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128 q0 -221 -147.5 -384.5t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h256v132q-125 13 -235 81l-254 -254q-10 -10 -23 -10t-23 10l-82 82q-10 10 -10 23t10 23l1234 1234q10 10 23 10t23 -10l82 -82q10 -10 10 -23 t-10 -23zM1005 1325l-621 -621v512q0 132 94 226t226 94q102 0 184.5 -59t116.5 -152z" />
+<glyph unicode="&#xf132;" horiz-adv-x="1280" d="M1088 576v640h-448v-1137q119 63 213 137q235 184 235 360zM1280 1344v-768q0 -86 -33.5 -170.5t-83 -150t-118 -127.5t-126.5 -103t-121 -77.5t-89.5 -49.5t-42.5 -20q-12 -6 -26 -6t-26 6q-16 7 -42.5 20t-89.5 49.5t-121 77.5t-126.5 103t-118 127.5t-83 150 t-33.5 170.5v768q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf133;" horiz-adv-x="1664" d="M128 -128h1408v1024h-1408v-1024zM512 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1280 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1664 1152v-1280 q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90z" />
+<glyph unicode="&#xf134;" horiz-adv-x="1408" d="M512 1344q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 1376v-320q0 -16 -12 -25q-8 -7 -20 -7q-4 0 -7 1l-448 96q-11 2 -18 11t-7 20h-256v-102q111 -23 183.5 -111t72.5 -203v-800q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v800 q0 106 62.5 190.5t161.5 114.5v111h-32q-59 0 -115 -23.5t-91.5 -53t-66 -66.5t-40.5 -53.5t-14 -24.5q-17 -35 -57 -35q-16 0 -29 7q-23 12 -31.5 37t3.5 49q5 10 14.5 26t37.5 53.5t60.5 70t85 67t108.5 52.5q-25 42 -25 86q0 66 47 113t113 47t113 -47t47 -113 q0 -33 -14 -64h302q0 11 7 20t18 11l448 96q3 1 7 1q12 0 20 -7q12 -9 12 -25z" />
+<glyph unicode="&#xf135;" horiz-adv-x="1664" d="M1440 1088q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM1664 1376q0 -249 -75.5 -430.5t-253.5 -360.5q-81 -80 -195 -176l-20 -379q-2 -16 -16 -26l-384 -224q-7 -4 -16 -4q-12 0 -23 9l-64 64q-13 14 -8 32l85 276l-281 281l-276 -85q-3 -1 -9 -1 q-14 0 -23 9l-64 64q-17 19 -5 39l224 384q10 14 26 16l379 20q96 114 176 195q188 187 358 258t431 71q14 0 24 -9.5t10 -22.5z" />
+<glyph unicode="&#xf136;" horiz-adv-x="1792" d="M1745 763l-164 -763h-334l178 832q13 56 -15 88q-27 33 -83 33h-169l-204 -953h-334l204 953h-286l-204 -953h-334l204 953l-153 327h1276q101 0 189.5 -40.5t147.5 -113.5q60 -73 81 -168.5t0 -194.5z" />
+<glyph unicode="&#xf137;" d="M909 141l102 102q19 19 19 45t-19 45l-307 307l307 307q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45t19 -45l454 -454q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5 t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf138;" d="M717 141l454 454q19 19 19 45t-19 45l-454 454q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l307 -307l-307 -307q-19 -19 -19 -45t19 -45l102 -102q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5 t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf139;" d="M1165 397l102 102q19 19 19 45t-19 45l-454 454q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45t19 -45l102 -102q19 -19 45 -19t45 19l307 307l307 -307q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5 t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf13a;" d="M813 237l454 454q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-307 -307l-307 307q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l454 -454q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5 t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf13b;" horiz-adv-x="1408" d="M1130 939l16 175h-884l47 -534h612l-22 -228l-197 -53l-196 53l-13 140h-175l22 -278l362 -100h4v1l359 99l50 544h-644l-15 181h674zM0 1408h1408l-128 -1438l-578 -162l-574 162z" />
+<glyph unicode="&#xf13c;" horiz-adv-x="1792" d="M275 1408h1505l-266 -1333l-804 -267l-698 267l71 356h297l-29 -147l422 -161l486 161l68 339h-1208l58 297h1209l38 191h-1208z" />
+<glyph unicode="&#xf13d;" horiz-adv-x="1792" d="M960 1280q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1792 352v-352q0 -22 -20 -30q-8 -2 -12 -2q-13 0 -23 9l-93 93q-119 -143 -318.5 -226.5t-429.5 -83.5t-429.5 83.5t-318.5 226.5l-93 -93q-9 -9 -23 -9q-4 0 -12 2q-20 8 -20 30v352 q0 14 9 23t23 9h352q22 0 30 -20q8 -19 -7 -35l-100 -100q67 -91 189.5 -153.5t271.5 -82.5v647h-192q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h192v163q-58 34 -93 92.5t-35 128.5q0 106 75 181t181 75t181 -75t75 -181q0 -70 -35 -128.5t-93 -92.5v-163h192q26 0 45 -19 t19 -45v-128q0 -26 -19 -45t-45 -19h-192v-647q149 20 271.5 82.5t189.5 153.5l-100 100q-15 16 -7 35q8 20 30 20h352q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf13e;" horiz-adv-x="1152" d="M1056 768q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v576q0 40 28 68t68 28h32v320q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45q0 106 -75 181t-181 75t-181 -75t-75 -181 v-320h736z" />
+<glyph unicode="&#xf140;" d="M1024 640q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181zM1152 640q0 159 -112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5t112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5zM1280 640q0 -212 -150 -362t-362 -150t-362 150 t-150 362t150 362t362 150t362 -150t150 -362zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640 q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf141;" horiz-adv-x="1408" d="M384 800v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM896 800v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM1408 800v-192q0 -40 -28 -68t-68 -28h-192 q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf142;" horiz-adv-x="384" d="M384 288v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM384 800v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM384 1312v-192q0 -40 -28 -68t-68 -28h-192 q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68z" />
+<glyph unicode="&#xf143;" d="M512 256q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM863 162q-13 232 -177 396t-396 177q-14 1 -24 -9t-10 -23v-128q0 -13 8.5 -22t21.5 -10q154 -11 264 -121t121 -264q1 -13 10 -21.5t22 -8.5h128q13 0 23 10 t9 24zM1247 161q-5 154 -56 297.5t-139.5 260t-205 205t-260 139.5t-297.5 56q-14 1 -23 -9q-10 -10 -10 -23v-128q0 -13 9 -22t22 -10q204 -7 378 -111.5t278.5 -278.5t111.5 -378q1 -13 10 -22t22 -9h128q13 0 23 10q11 9 9 23zM1536 1120v-960q0 -119 -84.5 -203.5 t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf144;" d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM1152 585q32 18 32 55t-32 55l-544 320q-31 19 -64 1q-32 -19 -32 -56v-640q0 -37 32 -56 q16 -8 32 -8q17 0 32 9z" />
+<glyph unicode="&#xf145;" horiz-adv-x="1792" d="M1024 1084l316 -316l-572 -572l-316 316zM813 105l618 618q19 19 19 45t-19 45l-362 362q-18 18 -45 18t-45 -18l-618 -618q-19 -19 -19 -45t19 -45l362 -362q18 -18 45 -18t45 18zM1702 742l-907 -908q-37 -37 -90.5 -37t-90.5 37l-126 126q56 56 56 136t-56 136 t-136 56t-136 -56l-125 126q-37 37 -37 90.5t37 90.5l907 906q37 37 90.5 37t90.5 -37l125 -125q-56 -56 -56 -136t56 -136t136 -56t136 56l126 -125q37 -37 37 -90.5t-37 -90.5z" />
+<glyph unicode="&#xf146;" d="M1280 576v128q0 26 -19 45t-45 19h-896q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h896q26 0 45 19t19 45zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5 t84.5 -203.5z" />
+<glyph unicode="&#xf147;" horiz-adv-x="1408" d="M1152 736v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h832q14 0 23 -9t9 -23zM1280 288v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113zM1408 1120v-832q0 -119 -84.5 -203.5 t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf148;" horiz-adv-x="1024" d="M1018 933q-18 -37 -58 -37h-192v-864q0 -14 -9 -23t-23 -9h-704q-21 0 -29 18q-8 20 4 35l160 192q9 11 25 11h320v640h-192q-40 0 -58 37q-17 37 9 68l320 384q18 22 49 22t49 -22l320 -384q27 -32 9 -68z" />
+<glyph unicode="&#xf149;" horiz-adv-x="1024" d="M32 1280h704q13 0 22.5 -9.5t9.5 -23.5v-863h192q40 0 58 -37t-9 -69l-320 -384q-18 -22 -49 -22t-49 22l-320 384q-26 31 -9 69q18 37 58 37h192v640h-320q-14 0 -25 11l-160 192q-13 14 -4 34q9 19 29 19z" />
+<glyph unicode="&#xf14a;" d="M685 237l614 614q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-467 -467l-211 211q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l358 -358q19 -19 45 -19t45 19zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5 t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf14b;" d="M404 428l152 -152l-52 -52h-56v96h-96v56zM818 818q14 -13 -3 -30l-291 -291q-17 -17 -30 -3q-14 13 3 30l291 291q17 17 30 3zM544 128l544 544l-288 288l-544 -544v-288h288zM1152 736l92 92q28 28 28 68t-28 68l-152 152q-28 28 -68 28t-68 -28l-92 -92zM1536 1120 v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf14c;" d="M1280 608v480q0 26 -19 45t-45 19h-480q-42 0 -59 -39q-17 -41 14 -70l144 -144l-534 -534q-19 -19 -19 -45t19 -45l102 -102q19 -19 45 -19t45 19l534 534l144 -144q18 -19 45 -19q12 0 25 5q39 17 39 59zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960 q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf14d;" d="M1005 435l352 352q19 19 19 45t-19 45l-352 352q-30 31 -69 14q-40 -17 -40 -59v-160q-119 0 -216 -19.5t-162.5 -51t-114 -79t-76.5 -95.5t-44.5 -109t-21.5 -111.5t-5 -110.5q0 -181 167 -404q10 -12 25 -12q7 0 13 3q22 9 19 33q-44 354 62 473q46 52 130 75.5 t224 23.5v-160q0 -42 40 -59q12 -5 24 -5q26 0 45 19zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf14e;" d="M640 448l256 128l-256 128v-256zM1024 1039v-542l-512 -256v542zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103 t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf150;" d="M1145 861q18 -35 -5 -66l-320 -448q-19 -27 -52 -27t-52 27l-320 448q-23 31 -5 66q17 35 57 35h640q40 0 57 -35zM1280 160v960q0 13 -9.5 22.5t-22.5 9.5h-960q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5zM1536 1120 v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf151;" d="M1145 419q-17 -35 -57 -35h-640q-40 0 -57 35q-18 35 5 66l320 448q19 27 52 27t52 -27l320 -448q23 -31 5 -66zM1280 160v960q0 13 -9.5 22.5t-22.5 9.5h-960q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5zM1536 1120v-960 q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf152;" d="M1088 640q0 -33 -27 -52l-448 -320q-31 -23 -66 -5q-35 17 -35 57v640q0 40 35 57q35 18 66 -5l448 -320q27 -19 27 -52zM1280 160v960q0 14 -9 23t-23 9h-960q-14 0 -23 -9t-9 -23v-960q0 -14 9 -23t23 -9h960q14 0 23 9t9 23zM1536 1120v-960q0 -119 -84.5 -203.5 t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf153;" horiz-adv-x="1024" d="M976 229l35 -159q3 -12 -3 -22.5t-17 -14.5l-5 -1q-4 -2 -10.5 -3.5t-16 -4.5t-21.5 -5.5t-25.5 -5t-30 -5t-33.5 -4.5t-36.5 -3t-38.5 -1q-234 0 -409 130.5t-238 351.5h-95q-13 0 -22.5 9.5t-9.5 22.5v113q0 13 9.5 22.5t22.5 9.5h66q-2 57 1 105h-67q-14 0 -23 9 t-9 23v114q0 14 9 23t23 9h98q67 210 243.5 338t400.5 128q102 0 194 -23q11 -3 20 -15q6 -11 3 -24l-43 -159q-3 -13 -14 -19.5t-24 -2.5l-4 1q-4 1 -11.5 2.5l-17.5 3.5t-22.5 3.5t-26 3t-29 2.5t-29.5 1q-126 0 -226 -64t-150 -176h468q16 0 25 -12q10 -12 7 -26 l-24 -114q-5 -26 -32 -26h-488q-3 -37 0 -105h459q15 0 25 -12q9 -12 6 -27l-24 -112q-2 -11 -11 -18.5t-20 -7.5h-387q48 -117 149.5 -185.5t228.5 -68.5q18 0 36 1.5t33.5 3.5t29.5 4.5t24.5 5t18.5 4.5l12 3l5 2q13 5 26 -2q12 -7 15 -21z" />
+<glyph unicode="&#xf154;" horiz-adv-x="1024" d="M1020 399v-367q0 -14 -9 -23t-23 -9h-956q-14 0 -23 9t-9 23v150q0 13 9.5 22.5t22.5 9.5h97v383h-95q-14 0 -23 9.5t-9 22.5v131q0 14 9 23t23 9h95v223q0 171 123.5 282t314.5 111q185 0 335 -125q9 -8 10 -20.5t-7 -22.5l-103 -127q-9 -11 -22 -12q-13 -2 -23 7 q-5 5 -26 19t-69 32t-93 18q-85 0 -137 -47t-52 -123v-215h305q13 0 22.5 -9t9.5 -23v-131q0 -13 -9.5 -22.5t-22.5 -9.5h-305v-379h414v181q0 13 9 22.5t23 9.5h162q14 0 23 -9.5t9 -22.5z" />
+<glyph unicode="&#xf155;" horiz-adv-x="1024" d="M978 351q0 -153 -99.5 -263.5t-258.5 -136.5v-175q0 -14 -9 -23t-23 -9h-135q-13 0 -22.5 9.5t-9.5 22.5v175q-66 9 -127.5 31t-101.5 44.5t-74 48t-46.5 37.5t-17.5 18q-17 21 -2 41l103 135q7 10 23 12q15 2 24 -9l2 -2q113 -99 243 -125q37 -8 74 -8q81 0 142.5 43 t61.5 122q0 28 -15 53t-33.5 42t-58.5 37.5t-66 32t-80 32.5q-39 16 -61.5 25t-61.5 26.5t-62.5 31t-56.5 35.5t-53.5 42.5t-43.5 49t-35.5 58t-21 66.5t-8.5 78q0 138 98 242t255 134v180q0 13 9.5 22.5t22.5 9.5h135q14 0 23 -9t9 -23v-176q57 -6 110.5 -23t87 -33.5 t63.5 -37.5t39 -29t15 -14q17 -18 5 -38l-81 -146q-8 -15 -23 -16q-14 -3 -27 7q-3 3 -14.5 12t-39 26.5t-58.5 32t-74.5 26t-85.5 11.5q-95 0 -155 -43t-60 -111q0 -26 8.5 -48t29.5 -41.5t39.5 -33t56 -31t60.5 -27t70 -27.5q53 -20 81 -31.5t76 -35t75.5 -42.5t62 -50 t53 -63.5t31.5 -76.5t13 -94z" />
+<glyph unicode="&#xf156;" horiz-adv-x="898" d="M898 1066v-102q0 -14 -9 -23t-23 -9h-168q-23 -144 -129 -234t-276 -110q167 -178 459 -536q14 -16 4 -34q-8 -18 -29 -18h-195q-16 0 -25 12q-306 367 -498 571q-9 9 -9 22v127q0 13 9.5 22.5t22.5 9.5h112q132 0 212.5 43t102.5 125h-427q-14 0 -23 9t-9 23v102 q0 14 9 23t23 9h413q-57 113 -268 113h-145q-13 0 -22.5 9.5t-9.5 22.5v133q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-233q47 -61 64 -144h171q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf157;" horiz-adv-x="1027" d="M603 0h-172q-13 0 -22.5 9t-9.5 23v330h-288q-13 0 -22.5 9t-9.5 23v103q0 13 9.5 22.5t22.5 9.5h288v85h-288q-13 0 -22.5 9t-9.5 23v104q0 13 9.5 22.5t22.5 9.5h214l-321 578q-8 16 0 32q10 16 28 16h194q19 0 29 -18l215 -425q19 -38 56 -125q10 24 30.5 68t27.5 61 l191 420q8 19 29 19h191q17 0 27 -16q9 -14 1 -31l-313 -579h215q13 0 22.5 -9.5t9.5 -22.5v-104q0 -14 -9.5 -23t-22.5 -9h-290v-85h290q13 0 22.5 -9.5t9.5 -22.5v-103q0 -14 -9.5 -23t-22.5 -9h-290v-330q0 -13 -9.5 -22.5t-22.5 -9.5z" />
+<glyph unicode="&#xf158;" horiz-adv-x="1280" d="M1043 971q0 100 -65 162t-171 62h-320v-448h320q106 0 171 62t65 162zM1280 971q0 -193 -126.5 -315t-326.5 -122h-340v-118h505q14 0 23 -9t9 -23v-128q0 -14 -9 -23t-23 -9h-505v-192q0 -14 -9.5 -23t-22.5 -9h-167q-14 0 -23 9t-9 23v192h-224q-14 0 -23 9t-9 23v128 q0 14 9 23t23 9h224v118h-224q-14 0 -23 9t-9 23v149q0 13 9 22.5t23 9.5h224v629q0 14 9 23t23 9h539q200 0 326.5 -122t126.5 -315z" />
+<glyph unicode="&#xf159;" horiz-adv-x="1792" d="M514 341l81 299h-159l75 -300q1 -1 1 -3t1 -3q0 1 0.5 3.5t0.5 3.5zM630 768l35 128h-292l32 -128h225zM822 768h139l-35 128h-70zM1271 340l78 300h-162l81 -299q0 -1 0.5 -3.5t1.5 -3.5q0 1 0.5 3t0.5 3zM1382 768l33 128h-297l34 -128h230zM1792 736v-64q0 -14 -9 -23 t-23 -9h-213l-164 -616q-7 -24 -31 -24h-159q-24 0 -31 24l-166 616h-209l-167 -616q-7 -24 -31 -24h-159q-11 0 -19.5 7t-10.5 17l-160 616h-208q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h175l-33 128h-142q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h109l-89 344q-5 15 5 28 q10 12 26 12h137q26 0 31 -24l90 -360h359l97 360q7 24 31 24h126q24 0 31 -24l98 -360h365l93 360q5 24 31 24h137q16 0 26 -12q10 -13 5 -28l-91 -344h111q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-145l-34 -128h179q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf15a;" horiz-adv-x="1280" d="M1167 896q18 -182 -131 -258q117 -28 175 -103t45 -214q-7 -71 -32.5 -125t-64.5 -89t-97 -58.5t-121.5 -34.5t-145.5 -15v-255h-154v251q-80 0 -122 1v-252h-154v255q-18 0 -54 0.5t-55 0.5h-200l31 183h111q50 0 58 51v402h16q-6 1 -16 1v287q-13 68 -89 68h-111v164 l212 -1q64 0 97 1v252h154v-247q82 2 122 2v245h154v-252q79 -7 140 -22.5t113 -45t82.5 -78t36.5 -114.5zM952 351q0 36 -15 64t-37 46t-57.5 30.5t-65.5 18.5t-74 9t-69 3t-64.5 -1t-47.5 -1v-338q8 0 37 -0.5t48 -0.5t53 1.5t58.5 4t57 8.5t55.5 14t47.5 21t39.5 30 t24.5 40t9.5 51zM881 827q0 33 -12.5 58.5t-30.5 42t-48 28t-55 16.5t-61.5 8t-58 2.5t-54 -1t-39.5 -0.5v-307q5 0 34.5 -0.5t46.5 0t50 2t55 5.5t51.5 11t48.5 18.5t37 27t27 38.5t9 51z" />
+<glyph unicode="&#xf15b;" horiz-adv-x="1280" d="M1280 768v-800q0 -40 -28 -68t-68 -28h-1088q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h544v-544q0 -40 28 -68t68 -28h544zM1277 896h-509v509q82 -15 132 -65l312 -312q50 -50 65 -132z" />
+<glyph unicode="&#xf15c;" horiz-adv-x="1280" d="M1024 160v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h704q14 0 23 9t9 23zM1024 416v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h704q14 0 23 9t9 23zM1280 768v-800q0 -40 -28 -68t-68 -28h-1088q-40 0 -68 28 t-28 68v1344q0 40 28 68t68 28h544v-544q0 -40 28 -68t68 -28h544zM1277 896h-509v509q82 -15 132 -65l312 -312q50 -50 65 -132z" />
+<glyph unicode="&#xf15d;" horiz-adv-x="1664" d="M1191 1128h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18t-7.5 -29zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23zM1572 -23 v-233h-584v90l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -11v-2l14 2q9 2 30 2h248v119h121zM1661 874v-106h-288v106h75l-47 144h-243l-47 -144h75v-106h-287v106h70l230 662h162 l230 -662h70z" />
+<glyph unicode="&#xf15e;" horiz-adv-x="1664" d="M1191 104h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18t-7.5 -29zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23zM1661 -150 v-106h-288v106h75l-47 144h-243l-47 -144h75v-106h-287v106h70l230 662h162l230 -662h70zM1572 1001v-233h-584v90l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -10v-3l14 3q9 1 30 1h248 v119h121z" />
+<glyph unicode="&#xf160;" horiz-adv-x="1792" d="M736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23zM1792 -32v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h832 q14 0 23 -9t9 -23zM1600 480v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23zM1408 992v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23zM1216 1504v-192q0 -14 -9 -23t-23 -9h-256 q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h256q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf161;" horiz-adv-x="1792" d="M1216 -32v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h256q14 0 23 -9t9 -23zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192 q14 0 23 -9t9 -23zM1408 480v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23zM1600 992v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23zM1792 1504v-192q0 -14 -9 -23t-23 -9h-832 q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h832q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf162;" d="M1346 223q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94t36.5 -95t104.5 -38q50 0 85 27t35 68zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23 zM1486 165q0 -62 -13 -121.5t-41 -114t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5q0 105 72 178t181 73q123 0 205 -94.5 t82 -252.5zM1456 882v-114h-469v114h167v432q0 7 0.5 19t0.5 17v16h-2l-7 -12q-8 -13 -26 -31l-62 -58l-82 86l192 185h123v-654h165z" />
+<glyph unicode="&#xf163;" d="M1346 1247q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94t36.5 -95t104.5 -38q50 0 85 27t35 68zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9 t9 -23zM1456 -142v-114h-469v114h167v432q0 7 0.5 19t0.5 17v16h-2l-7 -12q-8 -13 -26 -31l-62 -58l-82 86l192 185h123v-654h165zM1486 1189q0 -62 -13 -121.5t-41 -114t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13 q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5z" />
+<glyph unicode="&#xf164;" horiz-adv-x="1664" d="M256 192q0 26 -19 45t-45 19q-27 0 -45.5 -19t-18.5 -45q0 -27 18.5 -45.5t45.5 -18.5q26 0 45 18.5t19 45.5zM416 704v-640q0 -26 -19 -45t-45 -19h-288q-26 0 -45 19t-19 45v640q0 26 19 45t45 19h288q26 0 45 -19t19 -45zM1600 704q0 -86 -55 -149q15 -44 15 -76 q3 -76 -43 -137q17 -56 0 -117q-15 -57 -54 -94q9 -112 -49 -181q-64 -76 -197 -78h-36h-76h-17q-66 0 -144 15.5t-121.5 29t-120.5 39.5q-123 43 -158 44q-26 1 -45 19.5t-19 44.5v641q0 25 18 43.5t43 20.5q24 2 76 59t101 121q68 87 101 120q18 18 31 48t17.5 48.5 t13.5 60.5q7 39 12.5 61t19.5 52t34 50q19 19 45 19q46 0 82.5 -10.5t60 -26t40 -40.5t24 -45t12 -50t5 -45t0.5 -39q0 -38 -9.5 -76t-19 -60t-27.5 -56q-3 -6 -10 -18t-11 -22t-8 -24h277q78 0 135 -57t57 -135z" />
+<glyph unicode="&#xf165;" horiz-adv-x="1664" d="M256 960q0 -26 -19 -45t-45 -19q-27 0 -45.5 19t-18.5 45q0 27 18.5 45.5t45.5 18.5q26 0 45 -18.5t19 -45.5zM416 448v640q0 26 -19 45t-45 19h-288q-26 0 -45 -19t-19 -45v-640q0 -26 19 -45t45 -19h288q26 0 45 19t19 45zM1545 597q55 -61 55 -149q-1 -78 -57.5 -135 t-134.5 -57h-277q4 -14 8 -24t11 -22t10 -18q18 -37 27 -57t19 -58.5t10 -76.5q0 -24 -0.5 -39t-5 -45t-12 -50t-24 -45t-40 -40.5t-60 -26t-82.5 -10.5q-26 0 -45 19q-20 20 -34 50t-19.5 52t-12.5 61q-9 42 -13.5 60.5t-17.5 48.5t-31 48q-33 33 -101 120q-49 64 -101 121 t-76 59q-25 2 -43 20.5t-18 43.5v641q0 26 19 44.5t45 19.5q35 1 158 44q77 26 120.5 39.5t121.5 29t144 15.5h17h76h36q133 -2 197 -78q58 -69 49 -181q39 -37 54 -94q17 -61 0 -117q46 -61 43 -137q0 -32 -15 -76z" />
+<glyph unicode="&#xf166;" d="M919 233v157q0 50 -29 50q-17 0 -33 -16v-224q16 -16 33 -16q29 0 29 49zM1103 355h66v34q0 51 -33 51t-33 -51v-34zM532 621v-70h-80v-423h-74v423h-78v70h232zM733 495v-367h-67v40q-39 -45 -76 -45q-33 0 -42 28q-6 16 -6 54v290h66v-270q0 -24 1 -26q1 -15 15 -15 q20 0 42 31v280h67zM985 384v-146q0 -52 -7 -73q-12 -42 -53 -42q-35 0 -68 41v-36h-67v493h67v-161q32 40 68 40q41 0 53 -42q7 -21 7 -74zM1236 255v-9q0 -29 -2 -43q-3 -22 -15 -40q-27 -40 -80 -40q-52 0 -81 38q-21 27 -21 86v129q0 59 20 86q29 38 80 38t78 -38 q21 -28 21 -86v-76h-133v-65q0 -51 34 -51q24 0 30 26q0 1 0.5 7t0.5 16.5v21.5h68zM785 1079v-156q0 -51 -32 -51t-32 51v156q0 52 32 52t32 -52zM1318 366q0 177 -19 260q-10 44 -43 73.5t-76 34.5q-136 15 -412 15q-275 0 -411 -15q-44 -5 -76.5 -34.5t-42.5 -73.5 q-20 -87 -20 -260q0 -176 20 -260q10 -43 42.5 -73t75.5 -35q137 -15 412 -15t412 15q43 5 75.5 35t42.5 73q20 84 20 260zM563 1017l90 296h-75l-51 -195l-53 195h-78l24 -69t23 -69q35 -103 46 -158v-201h74v201zM852 936v130q0 58 -21 87q-29 38 -78 38q-51 0 -78 -38 q-21 -29 -21 -87v-130q0 -58 21 -87q27 -38 78 -38q49 0 78 38q21 27 21 87zM1033 816h67v370h-67v-283q-22 -31 -42 -31q-15 0 -16 16q-1 2 -1 26v272h-67v-293q0 -37 6 -55q11 -27 43 -27q36 0 77 45v-40zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960 q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf167;" d="M971 292v-211q0 -67 -39 -67q-23 0 -45 22v301q22 22 45 22q39 0 39 -67zM1309 291v-46h-90v46q0 68 45 68t45 -68zM343 509h107v94h-312v-94h105v-569h100v569zM631 -60h89v494h-89v-378q-30 -42 -57 -42q-18 0 -21 21q-1 3 -1 35v364h-89v-391q0 -49 8 -73 q12 -37 58 -37q48 0 102 61v-54zM1060 88v197q0 73 -9 99q-17 56 -71 56q-50 0 -93 -54v217h-89v-663h89v48q45 -55 93 -55q54 0 71 55q9 27 9 100zM1398 98v13h-91q0 -51 -2 -61q-7 -36 -40 -36q-46 0 -46 69v87h179v103q0 79 -27 116q-39 51 -106 51q-68 0 -107 -51 q-28 -37 -28 -116v-173q0 -79 29 -116q39 -51 108 -51q72 0 108 53q18 27 21 54q2 9 2 58zM790 1011v210q0 69 -43 69t-43 -69v-210q0 -70 43 -70t43 70zM1509 260q0 -234 -26 -350q-14 -59 -58 -99t-102 -46q-184 -21 -555 -21t-555 21q-58 6 -102.5 46t-57.5 99 q-26 112 -26 350q0 234 26 350q14 59 58 99t103 47q183 20 554 20t555 -20q58 -7 102.5 -47t57.5 -99q26 -112 26 -350zM511 1536h102l-121 -399v-271h-100v271q-14 74 -61 212q-37 103 -65 187h106l71 -263zM881 1203v-175q0 -81 -28 -118q-37 -51 -106 -51q-67 0 -105 51 q-28 38 -28 118v175q0 80 28 117q38 51 105 51q69 0 106 -51q28 -37 28 -117zM1216 1365v-499h-91v55q-53 -62 -103 -62q-46 0 -59 37q-8 24 -8 75v394h91v-367q0 -33 1 -35q3 -22 21 -22q27 0 57 43v381h91z" />
+<glyph unicode="&#xf168;" horiz-adv-x="1408" d="M597 869q-10 -18 -257 -456q-27 -46 -65 -46h-239q-21 0 -31 17t0 36l253 448q1 0 0 1l-161 279q-12 22 -1 37q9 15 32 15h239q40 0 66 -45zM1403 1511q11 -16 0 -37l-528 -934v-1l336 -615q11 -20 1 -37q-10 -15 -32 -15h-239q-42 0 -66 45l-339 622q18 32 531 942 q25 45 64 45h241q22 0 31 -15z" />
+<glyph unicode="&#xf169;" d="M685 771q0 1 -126 222q-21 34 -52 34h-184q-18 0 -26 -11q-7 -12 1 -29l125 -216v-1l-196 -346q-9 -14 0 -28q8 -13 24 -13h185q31 0 50 36zM1309 1268q-7 12 -24 12h-187q-30 0 -49 -35l-411 -729q1 -2 262 -481q20 -35 52 -35h184q18 0 25 12q8 13 -1 28l-260 476v1 l409 723q8 16 0 28zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf16a;" horiz-adv-x="1792" d="M1280 640q0 37 -30 54l-512 320q-31 20 -65 2q-33 -18 -33 -56v-640q0 -38 33 -56q16 -8 31 -8q20 0 34 10l512 320q30 17 30 54zM1792 640q0 -96 -1 -150t-8.5 -136.5t-22.5 -147.5q-16 -73 -69 -123t-124 -58q-222 -25 -671 -25t-671 25q-71 8 -124.5 58t-69.5 123 q-14 65 -21.5 147.5t-8.5 136.5t-1 150t1 150t8.5 136.5t22.5 147.5q16 73 69 123t124 58q222 25 671 25t671 -25q71 -8 124.5 -58t69.5 -123q14 -65 21.5 -147.5t8.5 -136.5t1 -150z" />
+<glyph unicode="&#xf16b;" horiz-adv-x="1792" d="M402 829l494 -305l-342 -285l-490 319zM1388 274v-108l-490 -293v-1l-1 1l-1 -1v1l-489 293v108l147 -96l342 284v2l1 -1l1 1v-2l343 -284zM554 1418l342 -285l-494 -304l-338 270zM1390 829l338 -271l-489 -319l-343 285zM1239 1418l489 -319l-338 -270l-494 304z" />
+<glyph unicode="&#xf16c;" horiz-adv-x="1408" d="M928 135v-151l-707 -1v151zM1169 481v-701l-1 -35v-1h-1132l-35 1h-1v736h121v-618h928v618h120zM241 393l704 -65l-13 -150l-705 65zM309 709l683 -183l-39 -146l-683 183zM472 1058l609 -360l-77 -130l-609 360zM832 1389l398 -585l-124 -85l-399 584zM1285 1536 l121 -697l-149 -26l-121 697z" />
+<glyph unicode="&#xf16d;" d="M1362 110v648h-135q20 -63 20 -131q0 -126 -64 -232.5t-174 -168.5t-240 -62q-197 0 -337 135.5t-140 327.5q0 68 20 131h-141v-648q0 -26 17.5 -43.5t43.5 -17.5h1069q25 0 43 17.5t18 43.5zM1078 643q0 124 -90.5 211.5t-218.5 87.5q-127 0 -217.5 -87.5t-90.5 -211.5 t90.5 -211.5t217.5 -87.5q128 0 218.5 87.5t90.5 211.5zM1362 1003v165q0 28 -20 48.5t-49 20.5h-174q-29 0 -49 -20.5t-20 -48.5v-165q0 -29 20 -49t49 -20h174q29 0 49 20t20 49zM1536 1211v-1142q0 -81 -58 -139t-139 -58h-1142q-81 0 -139 58t-58 139v1142q0 81 58 139 t139 58h1142q81 0 139 -58t58 -139z" />
+<glyph unicode="&#xf16e;" d="M1248 1408q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960zM698 640q0 88 -62 150t-150 62t-150 -62t-62 -150t62 -150t150 -62t150 62t62 150zM1262 640q0 88 -62 150 t-150 62t-150 -62t-62 -150t62 -150t150 -62t150 62t62 150z" />
+<glyph unicode="&#xf170;" d="M768 914l201 -306h-402zM1133 384h94l-459 691l-459 -691h94l104 160h522zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf171;" horiz-adv-x="1408" d="M815 677q8 -63 -50.5 -101t-111.5 -6q-39 17 -53.5 58t-0.5 82t52 58q36 18 72.5 12t64 -35.5t27.5 -67.5zM926 698q-14 107 -113 164t-197 13q-63 -28 -100.5 -88.5t-34.5 -129.5q4 -91 77.5 -155t165.5 -56q91 8 152 84t50 168zM1165 1240q-20 27 -56 44.5t-58 22 t-71 12.5q-291 47 -566 -2q-43 -7 -66 -12t-55 -22t-50 -43q30 -28 76 -45.5t73.5 -22t87.5 -11.5q228 -29 448 -1q63 8 89.5 12t72.5 21.5t75 46.5zM1222 205q-8 -26 -15.5 -76.5t-14 -84t-28.5 -70t-58 -56.5q-86 -48 -189.5 -71.5t-202 -22t-201.5 18.5q-46 8 -81.5 18 t-76.5 27t-73 43.5t-52 61.5q-25 96 -57 292l6 16l18 9q223 -148 506.5 -148t507.5 148q21 -6 24 -23t-5 -45t-8 -37zM1403 1166q-26 -167 -111 -655q-5 -30 -27 -56t-43.5 -40t-54.5 -31q-252 -126 -610 -88q-248 27 -394 139q-15 12 -25.5 26.5t-17 35t-9 34t-6 39.5 t-5.5 35q-9 50 -26.5 150t-28 161.5t-23.5 147.5t-22 158q3 26 17.5 48.5t31.5 37.5t45 30t46 22.5t48 18.5q125 46 313 64q379 37 676 -50q155 -46 215 -122q16 -20 16.5 -51t-5.5 -54z" />
+<glyph unicode="&#xf172;" d="M848 666q0 43 -41 66t-77 1q-43 -20 -42.5 -72.5t43.5 -70.5q39 -23 81 4t36 72zM928 682q8 -66 -36 -121t-110 -61t-119 40t-56 113q-2 49 25.5 93t72.5 64q70 31 141.5 -10t81.5 -118zM1100 1073q-20 -21 -53.5 -34t-53 -16t-63.5 -8q-155 -20 -324 0q-44 6 -63 9.5 t-52.5 16t-54.5 32.5q13 19 36 31t40 15.5t47 8.5q198 35 408 1q33 -5 51 -8.5t43 -16t39 -31.5zM1142 327q0 7 5.5 26.5t3 32t-17.5 16.5q-161 -106 -365 -106t-366 106l-12 -6l-5 -12q26 -154 41 -210q47 -81 204 -108q249 -46 428 53q34 19 49 51.5t22.5 85.5t12.5 71z M1272 1020q9 53 -8 75q-43 55 -155 88q-216 63 -487 36q-132 -12 -226 -46q-38 -15 -59.5 -25t-47 -34t-29.5 -54q8 -68 19 -138t29 -171t24 -137q1 -5 5 -31t7 -36t12 -27t22 -28q105 -80 284 -100q259 -28 440 63q24 13 39.5 23t31 29t19.5 40q48 267 80 473zM1536 1120 v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf173;" horiz-adv-x="1024" d="M390 1408h219v-388h364v-241h-364v-394q0 -136 14 -172q13 -37 52 -60q50 -31 117 -31q117 0 232 76v-242q-102 -48 -178 -65q-77 -19 -173 -19q-105 0 -186 27q-78 25 -138 75q-58 51 -79 105q-22 54 -22 161v539h-170v217q91 30 155 84q64 55 103 132q39 78 54 196z " />
+<glyph unicode="&#xf174;" d="M1123 127v181q-88 -56 -174 -56q-51 0 -88 23q-29 17 -39 45q-11 30 -11 129v295h274v181h-274v291h-164q-11 -90 -40 -147t-78 -99q-48 -40 -116 -63v-163h127v-404q0 -78 17 -121q17 -42 59 -78q43 -37 104 -57q62 -20 140 -20q67 0 129 14q57 13 134 49zM1536 1120 v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf175;" horiz-adv-x="768" d="M765 237q8 -19 -5 -35l-350 -384q-10 -10 -23 -10q-14 0 -24 10l-355 384q-13 16 -5 35q9 19 29 19h224v1248q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1248h224q21 0 29 -19z" />
+<glyph unicode="&#xf176;" horiz-adv-x="768" d="M765 1043q-9 -19 -29 -19h-224v-1248q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v1248h-224q-21 0 -29 19t5 35l350 384q10 10 23 10q14 0 24 -10l355 -384q13 -16 5 -35z" />
+<glyph unicode="&#xf177;" horiz-adv-x="1792" d="M1792 736v-192q0 -14 -9 -23t-23 -9h-1248v-224q0 -21 -19 -29t-35 5l-384 350q-10 10 -10 23q0 14 10 24l384 354q16 14 35 6q19 -9 19 -29v-224h1248q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf178;" horiz-adv-x="1792" d="M1728 643q0 -14 -10 -24l-384 -354q-16 -14 -35 -6q-19 9 -19 29v224h-1248q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h1248v224q0 21 19 29t35 -5l384 -350q10 -10 10 -23z" />
+<glyph unicode="&#xf179;" horiz-adv-x="1408" d="M1393 321q-39 -125 -123 -250q-129 -196 -257 -196q-49 0 -140 32q-86 32 -151 32q-61 0 -142 -33q-81 -34 -132 -34q-152 0 -301 259q-147 261 -147 503q0 228 113 374q112 144 284 144q72 0 177 -30q104 -30 138 -30q45 0 143 34q102 34 173 34q119 0 213 -65 q52 -36 104 -100q-79 -67 -114 -118q-65 -94 -65 -207q0 -124 69 -223t158 -126zM1017 1494q0 -61 -29 -136q-30 -75 -93 -138q-54 -54 -108 -72q-37 -11 -104 -17q3 149 78 257q74 107 250 148q1 -3 2.5 -11t2.5 -11q0 -4 0.5 -10t0.5 -10z" />
+<glyph unicode="&#xf17a;" horiz-adv-x="1664" d="M682 530v-651l-682 94v557h682zM682 1273v-659h-682v565zM1664 530v-786l-907 125v661h907zM1664 1408v-794h-907v669z" />
+<glyph unicode="&#xf17b;" horiz-adv-x="1408" d="M493 1053q16 0 27.5 11.5t11.5 27.5t-11.5 27.5t-27.5 11.5t-27 -11.5t-11 -27.5t11 -27.5t27 -11.5zM915 1053q16 0 27 11.5t11 27.5t-11 27.5t-27 11.5t-27.5 -11.5t-11.5 -27.5t11.5 -27.5t27.5 -11.5zM103 869q42 0 72 -30t30 -72v-430q0 -43 -29.5 -73t-72.5 -30 t-73 30t-30 73v430q0 42 30 72t73 30zM1163 850v-666q0 -46 -32 -78t-77 -32h-75v-227q0 -43 -30 -73t-73 -30t-73 30t-30 73v227h-138v-227q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73l-1 227h-74q-46 0 -78 32t-32 78v666h918zM931 1255q107 -55 171 -153.5t64 -215.5 h-925q0 117 64 215.5t172 153.5l-71 131q-7 13 5 20q13 6 20 -6l72 -132q95 42 201 42t201 -42l72 132q7 12 20 6q12 -7 5 -20zM1408 767v-430q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73v430q0 43 30 72.5t72 29.5q43 0 73 -29.5t30 -72.5z" />
+<glyph unicode="&#xf17c;" d="M663 1125q-11 -1 -15.5 -10.5t-8.5 -9.5q-5 -1 -5 5q0 12 19 15h10zM750 1111q-4 -1 -11.5 6.5t-17.5 4.5q24 11 32 -2q3 -6 -3 -9zM399 684q-4 1 -6 -3t-4.5 -12.5t-5.5 -13.5t-10 -13q-7 -10 -1 -12q4 -1 12.5 7t12.5 18q1 3 2 7t2 6t1.5 4.5t0.5 4v3t-1 2.5t-3 2z M1254 325q0 18 -55 42q4 15 7.5 27.5t5 26t3 21.5t0.5 22.5t-1 19.5t-3.5 22t-4 20.5t-5 25t-5.5 26.5q-10 48 -47 103t-72 75q24 -20 57 -83q87 -162 54 -278q-11 -40 -50 -42q-31 -4 -38.5 18.5t-8 83.5t-11.5 107q-9 39 -19.5 69t-19.5 45.5t-15.5 24.5t-13 15t-7.5 7 q-14 62 -31 103t-29.5 56t-23.5 33t-15 40q-4 21 6 53.5t4.5 49.5t-44.5 25q-15 3 -44.5 18t-35.5 16q-8 1 -11 26t8 51t36 27q37 3 51 -30t4 -58q-11 -19 -2 -26.5t30 -0.5q13 4 13 36v37q-5 30 -13.5 50t-21 30.5t-23.5 15t-27 7.5q-107 -8 -89 -134q0 -15 -1 -15 q-9 9 -29.5 10.5t-33 -0.5t-15.5 5q1 57 -16 90t-45 34q-27 1 -41.5 -27.5t-16.5 -59.5q-1 -15 3.5 -37t13 -37.5t15.5 -13.5q10 3 16 14q4 9 -7 8q-7 0 -15.5 14.5t-9.5 33.5q-1 22 9 37t34 14q17 0 27 -21t9.5 -39t-1.5 -22q-22 -15 -31 -29q-8 -12 -27.5 -23.5 t-20.5 -12.5q-13 -14 -15.5 -27t7.5 -18q14 -8 25 -19.5t16 -19t18.5 -13t35.5 -6.5q47 -2 102 15q2 1 23 7t34.5 10.5t29.5 13t21 17.5q9 14 20 8q5 -3 6.5 -8.5t-3 -12t-16.5 -9.5q-20 -6 -56.5 -21.5t-45.5 -19.5q-44 -19 -70 -23q-25 -5 -79 2q-10 2 -9 -2t17 -19 q25 -23 67 -22q17 1 36 7t36 14t33.5 17.5t30 17t24.5 12t17.5 2.5t8.5 -11q0 -2 -1 -4.5t-4 -5t-6 -4.5t-8.5 -5t-9 -4.5t-10 -5t-9.5 -4.5q-28 -14 -67.5 -44t-66.5 -43t-49 -1q-21 11 -63 73q-22 31 -25 22q-1 -3 -1 -10q0 -25 -15 -56.5t-29.5 -55.5t-21 -58t11.5 -63 q-23 -6 -62.5 -90t-47.5 -141q-2 -18 -1.5 -69t-5.5 -59q-8 -24 -29 -3q-32 31 -36 94q-2 28 4 56q4 19 -1 18l-4 -5q-36 -65 10 -166q5 -12 25 -28t24 -20q20 -23 104 -90.5t93 -76.5q16 -15 17.5 -38t-14 -43t-45.5 -23q8 -15 29 -44.5t28 -54t7 -70.5q46 24 7 92 q-4 8 -10.5 16t-9.5 12t-2 6q3 5 13 9.5t20 -2.5q46 -52 166 -36q133 15 177 87q23 38 34 30q12 -6 10 -52q-1 -25 -23 -92q-9 -23 -6 -37.5t24 -15.5q3 19 14.5 77t13.5 90q2 21 -6.5 73.5t-7.5 97t23 70.5q15 18 51 18q1 37 34.5 53t72.5 10.5t60 -22.5zM626 1152 q3 17 -2.5 30t-11.5 15q-9 2 -9 -7q2 -5 5 -6q10 0 7 -15q-3 -20 8 -20q3 0 3 3zM1045 955q-2 8 -6.5 11.5t-13 5t-14.5 5.5q-5 3 -9.5 8t-7 8t-5.5 6.5t-4 4t-4 -1.5q-14 -16 7 -43.5t39 -31.5q9 -1 14.5 8t3.5 20zM867 1168q0 11 -5 19.5t-11 12.5t-9 3q-14 -1 -7 -7l4 -2 q14 -4 18 -31q0 -3 8 2zM921 1401q0 2 -2.5 5t-9 7t-9.5 6q-15 15 -24 15q-9 -1 -11.5 -7.5t-1 -13t-0.5 -12.5q-1 -4 -6 -10.5t-6 -9t3 -8.5q4 -3 8 0t11 9t15 9q1 1 9 1t15 2t9 7zM1486 60q20 -12 31 -24.5t12 -24t-2.5 -22.5t-15.5 -22t-23.5 -19.5t-30 -18.5 t-31.5 -16.5t-32 -15.5t-27 -13q-38 -19 -85.5 -56t-75.5 -64q-17 -16 -68 -19.5t-89 14.5q-18 9 -29.5 23.5t-16.5 25.5t-22 19.5t-47 9.5q-44 1 -130 1q-19 0 -57 -1.5t-58 -2.5q-44 -1 -79.5 -15t-53.5 -30t-43.5 -28.5t-53.5 -11.5q-29 1 -111 31t-146 43q-19 4 -51 9.5 t-50 9t-39.5 9.5t-33.5 14.5t-17 19.5q-10 23 7 66.5t18 54.5q1 16 -4 40t-10 42.5t-4.5 36.5t10.5 27q14 12 57 14t60 12q30 18 42 35t12 51q21 -73 -32 -106q-32 -20 -83 -15q-34 3 -43 -10q-13 -15 5 -57q2 -6 8 -18t8.5 -18t4.5 -17t1 -22q0 -15 -17 -49t-14 -48 q3 -17 37 -26q20 -6 84.5 -18.5t99.5 -20.5q24 -6 74 -22t82.5 -23t55.5 -4q43 6 64.5 28t23 48t-7.5 58.5t-19 52t-20 36.5q-121 190 -169 242q-68 74 -113 40q-11 -9 -15 15q-3 16 -2 38q1 29 10 52t24 47t22 42q8 21 26.5 72t29.5 78t30 61t39 54q110 143 124 195 q-12 112 -16 310q-2 90 24 151.5t106 104.5q39 21 104 21q53 1 106 -13.5t89 -41.5q57 -42 91.5 -121.5t29.5 -147.5q-5 -95 30 -214q34 -113 133 -218q55 -59 99.5 -163t59.5 -191q8 -49 5 -84.5t-12 -55.5t-20 -22q-10 -2 -23.5 -19t-27 -35.5t-40.5 -33.5t-61 -14 q-18 1 -31.5 5t-22.5 13.5t-13.5 15.5t-11.5 20.5t-9 19.5q-22 37 -41 30t-28 -49t7 -97q20 -70 1 -195q-10 -65 18 -100.5t73 -33t85 35.5q59 49 89.5 66.5t103.5 42.5q53 18 77 36.5t18.5 34.5t-25 28.5t-51.5 23.5q-33 11 -49.5 48t-15 72.5t15.5 47.5q1 -31 8 -56.5 t14.5 -40.5t20.5 -28.5t21 -19t21.5 -13t16.5 -9.5z" />
+<glyph unicode="&#xf17d;" d="M1024 36q-42 241 -140 498h-2l-2 -1q-16 -6 -43 -16.5t-101 -49t-137 -82t-131 -114.5t-103 -148l-15 11q184 -150 418 -150q132 0 256 52zM839 643q-21 49 -53 111q-311 -93 -673 -93q-1 -7 -1 -21q0 -124 44 -236.5t124 -201.5q50 89 123.5 166.5t142.5 124.5t130.5 81 t99.5 48l37 13q4 1 13 3.5t13 4.5zM732 855q-120 213 -244 378q-138 -65 -234 -186t-128 -272q302 0 606 80zM1416 536q-210 60 -409 29q87 -239 128 -469q111 75 185 189.5t96 250.5zM611 1277q-1 0 -2 -1q1 1 2 1zM1201 1132q-185 164 -433 164q-76 0 -155 -19 q131 -170 246 -382q69 26 130 60.5t96.5 61.5t65.5 57t37.5 40.5zM1424 647q-3 232 -149 410l-1 -1q-9 -12 -19 -24.5t-43.5 -44.5t-71 -60.5t-100 -65t-131.5 -64.5q25 -53 44 -95q2 -6 6.5 -17.5t7.5 -16.5q36 5 74.5 7t73.5 2t69 -1.5t64 -4t56.5 -5.5t48 -6.5t36.5 -6 t25 -4.5zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf17e;" d="M1173 473q0 50 -19.5 91.5t-48.5 68.5t-73 49t-82.5 34t-87.5 23l-104 24q-30 7 -44 10.5t-35 11.5t-30 16t-16.5 21t-7.5 30q0 77 144 77q43 0 77 -12t54 -28.5t38 -33.5t40 -29t48 -12q47 0 75.5 32t28.5 77q0 55 -56 99.5t-142 67.5t-182 23q-68 0 -132 -15.5 t-119.5 -47t-89 -87t-33.5 -128.5q0 -61 19 -106.5t56 -75.5t80 -48.5t103 -32.5l146 -36q90 -22 112 -36q32 -20 32 -60q0 -39 -40 -64.5t-105 -25.5q-51 0 -91.5 16t-65 38.5t-45.5 45t-46 38.5t-54 16q-50 0 -75.5 -30t-25.5 -75q0 -92 122 -157.5t291 -65.5 q73 0 140 18.5t122.5 53.5t88.5 93.5t33 131.5zM1536 256q0 -159 -112.5 -271.5t-271.5 -112.5q-130 0 -234 80q-77 -16 -150 -16q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5q0 73 16 150q-80 104 -80 234q0 159 112.5 271.5t271.5 112.5q130 0 234 -80 q77 16 150 16q143 0 273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -73 -16 -150q80 -104 80 -234z" />
+<glyph unicode="&#xf180;" horiz-adv-x="1664" d="M1483 512l-587 -587q-52 -53 -127.5 -53t-128.5 53l-587 587q-53 53 -53 128t53 128l587 587q53 53 128 53t128 -53l265 -265l-398 -399l-188 188q-42 42 -99 42q-59 0 -100 -41l-120 -121q-42 -40 -42 -99q0 -58 42 -100l406 -408q30 -28 67 -37l6 -4h28q60 0 99 41 l619 619l2 -3q53 -53 53 -128t-53 -128zM1406 1138l120 -120q14 -15 14 -36t-14 -36l-730 -730q-17 -15 -37 -15v0q-4 0 -6 1q-18 2 -30 14l-407 408q-14 15 -14 36t14 35l121 120q13 15 35 15t36 -15l252 -252l574 575q15 15 36 15t36 -15z" />
+<glyph unicode="&#xf181;" d="M704 192v1024q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-1024q0 -14 9 -23t23 -9h480q14 0 23 9t9 23zM1376 576v640q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-640q0 -14 9 -23t23 -9h480q14 0 23 9t9 23zM1536 1344v-1408q0 -26 -19 -45t-45 -19h-1408 q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf182;" horiz-adv-x="1280" d="M1280 480q0 -40 -28 -68t-68 -28q-51 0 -80 43l-227 341h-45v-132l247 -411q9 -15 9 -33q0 -26 -19 -45t-45 -19h-192v-272q0 -46 -33 -79t-79 -33h-160q-46 0 -79 33t-33 79v272h-192q-26 0 -45 19t-19 45q0 18 9 33l247 411v132h-45l-227 -341q-29 -43 -80 -43 q-40 0 -68 28t-28 68q0 29 16 53l256 384q73 107 176 107h384q103 0 176 -107l256 -384q16 -24 16 -53zM864 1280q0 -93 -65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5z" />
+<glyph unicode="&#xf183;" horiz-adv-x="1024" d="M1024 832v-416q0 -40 -28 -68t-68 -28t-68 28t-28 68v352h-64v-912q0 -46 -33 -79t-79 -33t-79 33t-33 79v464h-64v-464q0 -46 -33 -79t-79 -33t-79 33t-33 79v912h-64v-352q0 -40 -28 -68t-68 -28t-68 28t-28 68v416q0 80 56 136t136 56h640q80 0 136 -56t56 -136z M736 1280q0 -93 -65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5z" />
+<glyph unicode="&#xf184;" d="M773 234l350 473q16 22 24.5 59t-6 85t-61.5 79q-40 26 -83 25.5t-73.5 -17.5t-54.5 -45q-36 -40 -96 -40q-59 0 -95 40q-24 28 -54.5 45t-73.5 17.5t-84 -25.5q-46 -31 -60.5 -79t-6 -85t24.5 -59zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103 t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf185;" horiz-adv-x="1792" d="M1472 640q0 117 -45.5 223.5t-123 184t-184 123t-223.5 45.5t-223.5 -45.5t-184 -123t-123 -184t-45.5 -223.5t45.5 -223.5t123 -184t184 -123t223.5 -45.5t223.5 45.5t184 123t123 184t45.5 223.5zM1748 363q-4 -15 -20 -20l-292 -96v-306q0 -16 -13 -26q-15 -10 -29 -4 l-292 94l-180 -248q-10 -13 -26 -13t-26 13l-180 248l-292 -94q-14 -6 -29 4q-13 10 -13 26v306l-292 96q-16 5 -20 20q-5 17 4 29l180 248l-180 248q-9 13 -4 29q4 15 20 20l292 96v306q0 16 13 26q15 10 29 4l292 -94l180 248q9 12 26 12t26 -12l180 -248l292 94 q14 6 29 -4q13 -10 13 -26v-306l292 -96q16 -5 20 -20q5 -16 -4 -29l-180 -248l180 -248q9 -12 4 -29z" />
+<glyph unicode="&#xf186;" d="M1262 233q-54 -9 -110 -9q-182 0 -337 90t-245 245t-90 337q0 192 104 357q-201 -60 -328.5 -229t-127.5 -384q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51q144 0 273.5 61.5t220.5 171.5zM1465 318q-94 -203 -283.5 -324.5t-413.5 -121.5q-156 0 -298 61 t-245 164t-164 245t-61 298q0 153 57.5 292.5t156 241.5t235.5 164.5t290 68.5q44 2 61 -39q18 -41 -15 -72q-86 -78 -131.5 -181.5t-45.5 -218.5q0 -148 73 -273t198 -198t273 -73q118 0 228 51q41 18 72 -13q14 -14 17.5 -34t-4.5 -38z" />
+<glyph unicode="&#xf187;" horiz-adv-x="1792" d="M1088 704q0 26 -19 45t-45 19h-256q-26 0 -45 -19t-19 -45t19 -45t45 -19h256q26 0 45 19t19 45zM1664 896v-960q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v960q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1728 1344v-256q0 -26 -19 -45t-45 -19h-1536 q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1536q26 0 45 -19t19 -45z" />
+<glyph unicode="&#xf188;" horiz-adv-x="1664" d="M1632 576q0 -26 -19 -45t-45 -19h-224q0 -171 -67 -290l208 -209q19 -19 19 -45t-19 -45q-18 -19 -45 -19t-45 19l-198 197q-5 -5 -15 -13t-42 -28.5t-65 -36.5t-82 -29t-97 -13v896h-128v-896q-51 0 -101.5 13.5t-87 33t-66 39t-43.5 32.5l-15 14l-183 -207 q-20 -21 -48 -21q-24 0 -43 16q-19 18 -20.5 44.5t15.5 46.5l202 227q-58 114 -58 274h-224q-26 0 -45 19t-19 45t19 45t45 19h224v294l-173 173q-19 19 -19 45t19 45t45 19t45 -19l173 -173h844l173 173q19 19 45 19t45 -19t19 -45t-19 -45l-173 -173v-294h224q26 0 45 -19 t19 -45zM1152 1152h-640q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5z" />
+<glyph unicode="&#xf189;" horiz-adv-x="1920" d="M1917 1016q23 -64 -150 -294q-24 -32 -65 -85q-78 -100 -90 -131q-17 -41 14 -81q17 -21 81 -82h1l1 -1l1 -1l2 -2q141 -131 191 -221q3 -5 6.5 -12.5t7 -26.5t-0.5 -34t-25 -27.5t-59 -12.5l-256 -4q-24 -5 -56 5t-52 22l-20 12q-30 21 -70 64t-68.5 77.5t-61 58 t-56.5 15.5q-3 -1 -8 -3.5t-17 -14.5t-21.5 -29.5t-17 -52t-6.5 -77.5q0 -15 -3.5 -27.5t-7.5 -18.5l-4 -5q-18 -19 -53 -22h-115q-71 -4 -146 16.5t-131.5 53t-103 66t-70.5 57.5l-25 24q-10 10 -27.5 30t-71.5 91t-106 151t-122.5 211t-130.5 272q-6 16 -6 27t3 16l4 6 q15 19 57 19l274 2q12 -2 23 -6.5t16 -8.5l5 -3q16 -11 24 -32q20 -50 46 -103.5t41 -81.5l16 -29q29 -60 56 -104t48.5 -68.5t41.5 -38.5t34 -14t27 5q2 1 5 5t12 22t13.5 47t9.5 81t0 125q-2 40 -9 73t-14 46l-6 12q-25 34 -85 43q-13 2 5 24q17 19 38 30q53 26 239 24 q82 -1 135 -13q20 -5 33.5 -13.5t20.5 -24t10.5 -32t3.5 -45.5t-1 -55t-2.5 -70.5t-1.5 -82.5q0 -11 -1 -42t-0.5 -48t3.5 -40.5t11.5 -39t22.5 -24.5q8 -2 17 -4t26 11t38 34.5t52 67t68 107.5q60 104 107 225q4 10 10 17.5t11 10.5l4 3l5 2.5t13 3t20 0.5l288 2 q39 5 64 -2.5t31 -16.5z" />
+<glyph unicode="&#xf18a;" horiz-adv-x="1792" d="M675 252q21 34 11 69t-45 50q-34 14 -73 1t-60 -46q-22 -34 -13 -68.5t43 -50.5t74.5 -2.5t62.5 47.5zM769 373q8 13 3.5 26.5t-17.5 18.5q-14 5 -28.5 -0.5t-21.5 -18.5q-17 -31 13 -45q14 -5 29 0.5t22 18.5zM943 266q-45 -102 -158 -150t-224 -12 q-107 34 -147.5 126.5t6.5 187.5q47 93 151.5 139t210.5 19q111 -29 158.5 -119.5t2.5 -190.5zM1255 426q-9 96 -89 170t-208.5 109t-274.5 21q-223 -23 -369.5 -141.5t-132.5 -264.5q9 -96 89 -170t208.5 -109t274.5 -21q223 23 369.5 141.5t132.5 264.5zM1563 422 q0 -68 -37 -139.5t-109 -137t-168.5 -117.5t-226 -83t-270.5 -31t-275 33.5t-240.5 93t-171.5 151t-65 199.5q0 115 69.5 245t197.5 258q169 169 341.5 236t246.5 -7q65 -64 20 -209q-4 -14 -1 -20t10 -7t14.5 0.5t13.5 3.5l6 2q139 59 246 59t153 -61q45 -63 0 -178 q-2 -13 -4.5 -20t4.5 -12.5t12 -7.5t17 -6q57 -18 103 -47t80 -81.5t34 -116.5zM1489 1046q42 -47 54.5 -108.5t-6.5 -117.5q-8 -23 -29.5 -34t-44.5 -4q-23 8 -34 29.5t-4 44.5q20 63 -24 111t-107 35q-24 -5 -45 8t-25 37q-5 24 8 44.5t37 25.5q60 13 119 -5.5t101 -65.5z M1670 1209q87 -96 112.5 -222.5t-13.5 -241.5q-9 -27 -34 -40t-52 -4t-40 34t-5 52q28 82 10 172t-80 158q-62 69 -148 95.5t-173 8.5q-28 -6 -52 9.5t-30 43.5t9.5 51.5t43.5 29.5q123 26 244 -11.5t208 -134.5z" />
+<glyph unicode="&#xf18b;" d="M1133 -34q-171 -94 -368 -94q-196 0 -367 94q138 87 235.5 211t131.5 268q35 -144 132.5 -268t235.5 -211zM638 1394v-485q0 -252 -126.5 -459.5t-330.5 -306.5q-181 215 -181 495q0 187 83.5 349.5t229.5 269.5t325 137zM1536 638q0 -280 -181 -495 q-204 99 -330.5 306.5t-126.5 459.5v485q179 -30 325 -137t229.5 -269.5t83.5 -349.5z" />
+<glyph unicode="&#xf18c;" horiz-adv-x="1408" d="M1402 433q-32 -80 -76 -138t-91 -88.5t-99 -46.5t-101.5 -14.5t-96.5 8.5t-86.5 22t-69.5 27.5t-46 22.5l-17 10q-113 -228 -289.5 -359.5t-384.5 -132.5q-19 0 -32 13t-13 32t13 31.5t32 12.5q173 1 322.5 107.5t251.5 294.5q-36 -14 -72 -23t-83 -13t-91 2.5t-93 28.5 t-92 59t-84.5 100t-74.5 146q114 47 214 57t167.5 -7.5t124.5 -56.5t88.5 -77t56.5 -82q53 131 79 291q-7 -1 -18 -2.5t-46.5 -2.5t-69.5 0.5t-81.5 10t-88.5 23t-84 42.5t-75 65t-54.5 94.5t-28.5 127.5q70 28 133.5 36.5t112.5 -1t92 -30t73.5 -50t56 -61t42 -63t27.5 -56 t16 -39.5l4 -16q12 122 12 195q-8 6 -21.5 16t-49 44.5t-63.5 71.5t-54 93t-33 112.5t12 127t70 138.5q73 -25 127.5 -61.5t84.5 -76.5t48 -85t20.5 -89t-0.5 -85.5t-13 -76.5t-19 -62t-17 -42l-7 -15q1 -5 1 -50.5t-1 -71.5q3 7 10 18.5t30.5 43t50.5 58t71 55.5t91.5 44.5 t112 14.5t132.5 -24q-2 -78 -21.5 -141.5t-50 -104.5t-69.5 -71.5t-81.5 -45.5t-84.5 -24t-80 -9.5t-67.5 1t-46.5 4.5l-17 3q-23 -147 -73 -283q6 7 18 18.5t49.5 41t77.5 52.5t99.5 42t117.5 20t129 -23.5t137 -77.5z" />
+<glyph unicode="&#xf18d;" horiz-adv-x="1280" d="M1259 283v-66q0 -85 -57.5 -144.5t-138.5 -59.5h-57l-260 -269v269h-529q-81 0 -138.5 59.5t-57.5 144.5v66h1238zM1259 609v-255h-1238v255h1238zM1259 937v-255h-1238v255h1238zM1259 1077v-67h-1238v67q0 84 57.5 143.5t138.5 59.5h846q81 0 138.5 -59.5t57.5 -143.5z " />
+<glyph unicode="&#xf18e;" d="M1152 640q0 -14 -9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-352q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h352v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198 t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf190;" d="M1152 736v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-352v-192q0 -14 -9 -23t-23 -9q-12 0 -24 10l-319 319q-9 9 -9 23t9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h352q13 0 22.5 -9.5t9.5 -22.5zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198 t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf191;" d="M1024 960v-640q0 -26 -19 -45t-45 -19q-20 0 -37 12l-448 320q-27 19 -27 52t27 52l448 320q17 12 37 12q26 0 45 -19t19 -45zM1280 160v960q0 13 -9.5 22.5t-22.5 9.5h-960q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5z M1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf192;" d="M1024 640q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181zM768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5 t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+<glyph unicode="&#xf193;" horiz-adv-x="1664" d="M1023 349l102 -204q-58 -179 -210 -290t-339 -111q-156 0 -288.5 77.5t-210 210t-77.5 288.5q0 181 104.5 330t274.5 211l17 -131q-122 -54 -195 -165.5t-73 -244.5q0 -185 131.5 -316.5t316.5 -131.5q126 0 232.5 65t165 175.5t49.5 236.5zM1571 249l58 -114l-256 -128 q-13 -7 -29 -7q-40 0 -57 35l-239 477h-472q-24 0 -42.5 16.5t-21.5 40.5l-96 779q-2 16 6 42q14 51 57 82.5t97 31.5q66 0 113 -47t47 -113q0 -69 -52 -117.5t-120 -41.5l37 -289h423v-128h-407l16 -128h455q40 0 57 -35l228 -455z" />
+<glyph unicode="&#xf194;" d="M1254 899q16 85 -21 132q-52 65 -187 45q-17 -3 -41 -12.5t-57.5 -30.5t-64.5 -48.5t-59.5 -70t-44.5 -91.5q80 7 113.5 -16t26.5 -99q-5 -52 -52 -143q-43 -78 -71 -99q-44 -32 -87 14q-23 24 -37.5 64.5t-19 73t-10 84t-8.5 71.5q-23 129 -34 164q-12 37 -35.5 69 t-50.5 40q-57 16 -127 -25q-54 -32 -136.5 -106t-122.5 -102v-7q16 -8 25.5 -26t21.5 -20q21 -3 54.5 8.5t58 10.5t41.5 -30q11 -18 18.5 -38.5t15 -48t12.5 -40.5q17 -46 53 -187q36 -146 57 -197q42 -99 103 -125q43 -12 85 -1.5t76 31.5q131 77 250 237 q104 139 172.5 292.5t82.5 226.5zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf195;" horiz-adv-x="1152" d="M1152 704q0 -191 -94.5 -353t-256.5 -256.5t-353 -94.5h-160q-14 0 -23 9t-9 23v611l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26v128q0 23 23 31l233 71v93l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26v128q0 23 23 31l233 71v250q0 14 9 23t23 9h160 q14 0 23 -9t9 -23v-181l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31l-393 -121v-93l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31l-393 -121v-487q188 13 318 151t130 328q0 14 9 23t23 9h160q14 0 23 -9t9 -23z" />
+<glyph unicode="&#xf196;" horiz-adv-x="1408" d="M1152 736v-64q0 -14 -9 -23t-23 -9h-352v-352q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v352h-352q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h352v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-352h352q14 0 23 -9t9 -23zM1280 288v832q0 66 -47 113t-113 47h-832 q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113zM1408 1120v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5z" />
+<glyph unicode="&#xf197;" horiz-adv-x="1792" />
+<glyph unicode="&#xf198;" horiz-adv-x="1792" />
+<glyph unicode="&#xf199;" horiz-adv-x="1792" />
+<glyph unicode="&#xf19a;" horiz-adv-x="1792" />
+<glyph unicode="&#xf19b;" horiz-adv-x="1792" />
+<glyph unicode="&#xf19c;" horiz-adv-x="1792" />
+<glyph unicode="&#xf19d;" horiz-adv-x="1792" />
+<glyph unicode="&#xf19e;" horiz-adv-x="1792" />
+<glyph unicode="&#xf500;" horiz-adv-x="1792" />
+</font>
+</defs></svg> \ No newline at end of file
diff --git a/doc/website-v1/fonts/fontawesome-webfont.ttf b/doc/website-v1/fonts/fontawesome-webfont.ttf
new file mode 100755
index 0000000..e89738d
--- /dev/null
+++ b/doc/website-v1/fonts/fontawesome-webfont.ttf
Binary files differ
diff --git a/doc/website-v1/fonts/fontawesome-webfont.woff b/doc/website-v1/fonts/fontawesome-webfont.woff
new file mode 100755
index 0000000..8c1748a
--- /dev/null
+++ b/doc/website-v1/fonts/fontawesome-webfont.woff
Binary files differ
diff --git a/doc/website-v1/history-guide.adoc b/doc/website-v1/history-guide.adoc
new file mode 100644
index 0000000..a3dd9c6
--- /dev/null
+++ b/doc/website-v1/history-guide.adoc
@@ -0,0 +1,275 @@
+= Cluster history =
+:source-highlighter: pygments
+
+This guide should help administrators and consultants tackle
+issues in Pacemaker cluster installations. We concentrate
+on troubleshooting and analysis methods with the crmsh history.
+
+Cluster leaves numerous traces behind, more than any other
+system. The logs and the rest are spread among all cluster nodes
+and multiple directories. The obvious difficulty is to show that
+information in a consolidated manner. This is where crmsh
+history helps.
+
+Hopefully, the guide will help you investigate your
+specific issue with more efficiency and less effort.
+
+== Sample cluster
+
+In <<Listing 1>> a modestly complex sample cluster is shown with
+which we can experiment and break in some hopefully instructive
+ways.
+
+NOTE: We won't be going into how to setup nodes or configure the
+ cluster. For that, please refer to the
+ link:/start-guide[Getting Started] document.
+
+[source,crmsh]
+[caption="Listing 1: "]
+.Sample cluster configuration[[Listing 1]]
+-----------------
+include::include/history-guide/sample-cluster.conf.crm[]
+-----------------
+
+If you're new to clusters, that configuration may look
+overwhelming. A graphical presentation in <<Image 1>> of the
+essential elements and relations between them is easier on the eye
+(and the mind).
+
+[caption="Image 1: "]
+.Sample cluster configuration as a graph[[Image 1]]
+image::/img/history-guide/sample-cluster.conf.png[link="/img/history-guide/sample-cluster.conf.png"]
+
+As homework, try to match the two cluster representations.
+
+== Quick (& dirty) start
+
+For the impatient, we give here a few examples of history use.
+
+Most of the time you will be dealing with various resource
+(a.k.a. application) induced phenomena. For instance, while
+preparing this document we noticed that a probe failed repeatedly
+on a node which wasn't even running the resource <<Listing 2>>.
+
+[source,ansiclr]
+[caption="Listing 2: "]
+.crm status output[[Listing 2]]
+-----------------
+include::include/history-guide/status-probe-fail.typescript[]
+-----------------
+
+The history +resource+ command shows log messages relevant to the
+supplied resource <<Listing 3>>.
+
+[source,ansiclr]
+[caption="Listing 3: "]
+.Logs on failed +nfs-server+ probe operation[[Listing 3]]
+-----------------
+include::include/history-guide/nfs-probe-err.typescript[]
+-----------------
+
+<1> NFS server error message.
+<2> Warning about a non-existing user id.
+
+NOTE: Messages logged by resource agents are always tagged with
+ 'type(ID)' (in <<Listing 3>>: +nfsserver(nfs-server)+).
+ +
+ Everything dumped to +stderr/stdout+ (in <<Listing 3>>:
+ +id: rpcuser: no such user+) is captured and subsequently
+ logged by +lrmd+. The +stdout+ output is at the 'info'
+ severity which is by default _not_ logged by pacemaker
+ since version 1.1.12.
+
+At the very top we see error message reporting that the
+NFS server is running, but some other stuff, apparently
+unexpectedly, is not. However, we know that it cannot be
+running on the 'c' node as it is already running on the 'a' node.
+Not being able to figure out what is going on, we had to turn on
+tracing of the resource agent. <<Listing 4>> shows how to do
+that.
+
+[source,ansiclr]
+[caption="Listing 4: "]
+.Set `nfs-server` probe operation resource tracing[[Listing 4]]
+-----------------
+include::include/history-guide/resource-trace.typescript[]
+-----------------
+
+Trace of the +nfsserver+ RA revealed that the +nfs-server+ init
+script (used internally by the resource agent) _always_ exits
+with success for status. That was actually due to the recent port
+to systemd and erroneous interpretation of `systemctl status`
+semantics: it always exits with success (due to some paradigm
+shift, we guess). FYI, `systemctl is-active` should be used
+instead and it does report a service status as expected.
+
+As a bonus, a minor issue about a non-existing user id +rpcuser+
+is also revealed.
+
+NOTE: Messages in the crm history log output are colored
+ depending on the originating host.
+
+The rest of this document gives more details about crmsh history.
+If you're more of a just-try-it-out person, enter +crm history+
+and experiment. With +history+ commands you cannot really break
+anything (fingers crossed).
+
+== Introduction to crmsh `history`
+
+The history crmsh feature, as the name suggests, deals with the
+past. It was conceived as a facility to bring to the fore all
+trails pacemaker cluster leaves behind which are relevant to a
+particular resource, node, or event. It is used in the first
+place as a troubleshooting tool, but it can also be helpful in
+studying pacemaker clusters.
+
+To begin, we run the `info` command which gives an overview, as
+shown in <<Listing 5>>.
+
+[source,ansiclr]
+[caption="Listing 5: "]
+.Basic history information[[Listing 5]]
+-----------------
+include::include/history-guide/info.typescript[]
+-----------------
+
+The `timeframe` command limits the observed period and helps
+focus on the events of interest. Here we wanted to look at the
+10 minute period. Two transitions were executed during this time.
+
+== Transitions
+
+Transitions are basic units capturing cluster movements
+(resource operations and node events). A transition
+consists of a set of actions to reach a desired cluster
+status as specified in the cluster configuration by the
+user.
+
+Every configuration or status change results in a transition.
+
+Every transition is also a CIB, which is how cluster
+configuration and status are stored. Transitions are saved
+to files, the so called PE (Policy Engine) inputs.
+
+In <<Listing 6>> we show how to display transitions.
+The listing is annotated to explain the output in more detail.
+
+
+[source,ansiclr]
+[caption="Listing 6: "]
+.Viewing transitions[[Listing 6]]
+-----------------
+include::include/history-guide/basic-transition.typescript[]
+-----------------
+
+<1> The transition command without arguments displays the latest
+transition.
+<2> Graph of transition actions is provided by `graphviz`. See
+<<Image 2>>.
+<3> Output of `crm_simulate` with irrelevant stuff edited out.
+`crm_simulate` was formerly known as `ptest`.
+<4> Transition summary followed by selection of log messages.
+History weeds out messages which are of lesser importance. See
+<<Listing 8>> if you want to see what history has been hiding
+from you here.
+
+Incidentally, if you wonder why all transitions in these examples
+are green, that is not because they were green in any sense of
+the color, but just due to that being color of node 'c': as
+chance would have it, 'c' was calling shots at the time (being
+Designated Coordinator or DC). That is also why all `crmd` and
+`pengine` messages are coming from 'c'.
+
+NOTE: Transitions are the basis of pacemaker operation, make sure
+that you understand them.
+
+What you cannot see in the listing is a graph generated and shown
+in a separate window in your X11 display. <<Image 2>> may not be
+very involved, but we reckon it's as good a start as starts go.
+
+[caption="Image 2: "]
+.Graph for transition 1907[[Image 2]]
+image::/img/history-guide/smallapache-start.png[link="/img/history-guide/smallapache-start.png"]
+
+It may sometimes be useful to see what changed between two
+transitions. History `diff` command is in action in <<Listing 7>>.
+
+[source,ansiclr]
+[caption="Listing 7: "]
+.Viewing transitions[[Listing 7]]
+-----------------
+include::include/history-guide/diff.typescript[]
+-----------------
+
+<1> Configuration diff between two last transitions. Transitions
+may be referenced with indexes starting at 0 and going backwards.
+<2> Status diff between two last transitions.
+
+Whereas configuration diff is (hopefully) obvious, status diff
+needs some explanation: the status section of the PE inputs
+(transitions) always lags behind the configuration. This is
+because at the time the transition is saved to a file, the
+actions of that transition are yet to be executed. So, the status
+section of transition _N_ corresponds to the configuration _N-1_.
+
+[source,ansiclr]
+[caption="Listing 8: "]
+.Full transition log[[Listing 8]]
+-----------------
+include::include/history-guide/transition-log.typescript[]
+-----------------
+
+== Resource and node events
+
+Apart from transitions, events such as resource start or stop are
+what we usually want to examine. In our extremely exciting
+example of apache resource restart, the history `resource`
+command picks the most interesting resource related messages as
+shown in <<Listing 9>>. Again, history shows only the most
+important log parts.
+
+NOTE: If you want to see more detail (which may not always be
+ recommendable), then use the history `detail` command to
+ increase the level of detail displayed.
+
+[source,ansiclr]
+[caption="Listing 9: "]
+.Resource related messages[[Listing 9]]
+-----------------
+include::include/history-guide/resource.typescript[]
+-----------------
+
+Node related events are node start and stop (cluster-wise),
+node membership changes, and stonith events (aka node fence).
+We'll refrain from showing examples of the history `node`
+command--it is analogue to the `resource` command.
+
+== Viewing logs
+
+History `log` command, unsurprisingly, displays logs. The
+messages from various nodes are weaved and shown in different
+colors for the sake of easier viewing. Unlike other history
+commands, `log` shows all messages captured in the report. If you
+find some of them irrelevant they can be filtered out:
+the `exclude` command takes extended regular expressions and it
+is additive. We usually set the exclude expression to at least
+`ssh|systemd|kernel`. Use `exclude clear` to remove all
+expressions. And don't forget the `timeframe` command that
+imposes a time window on the report.
+
+== External reports, configurations, and graphs
+
+The information source history works with is `hb_report`
+generated report. Even when examining live cluster, `hb_report` is
+run behind the scene to collect the data before presenting it to
+the user. Well, at least to generate the first report: there is a
+special procedure for log refreshing and collecting new PE
+inputs, which runs much faster than creating a report from
+scratch. However, juggling with multiple sources, appending logs,
+moving time windows, may not always be foolproof, and if
+the source gets borked you can always ask for a brand new report
+with `refresh force`.
+
+Analyzing reports from external source is no different from what
+we've seen so far. In fact, there's a `source` command which
+tells history where to look for data.
diff --git a/doc/website-v1/img/history-guide/sample-cluster.conf.png b/doc/website-v1/img/history-guide/sample-cluster.conf.png
new file mode 100644
index 0000000..0863923
--- /dev/null
+++ b/doc/website-v1/img/history-guide/sample-cluster.conf.png
Binary files differ
diff --git a/doc/website-v1/img/history-guide/smallapache-start.png b/doc/website-v1/img/history-guide/smallapache-start.png
new file mode 100644
index 0000000..47853c9
--- /dev/null
+++ b/doc/website-v1/img/history-guide/smallapache-start.png
Binary files differ
diff --git a/doc/website-v1/img/icons/README b/doc/website-v1/img/icons/README
new file mode 100644
index 0000000..f12b2a7
--- /dev/null
+++ b/doc/website-v1/img/icons/README
@@ -0,0 +1,5 @@
+Replaced the plain DocBook XSL admonition icons with Jimmac's DocBook
+icons (http://jimmac.musichall.cz/ikony.php3). I dropped transparency
+from the Jimmac icons to get round MS IE and FOP PNG incompatibilies.
+
+Stuart Rackham
diff --git a/doc/website-v1/img/icons/callouts/1.png b/doc/website-v1/img/icons/callouts/1.png
new file mode 100644
index 0000000..7d47343
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/1.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/10.png b/doc/website-v1/img/icons/callouts/10.png
new file mode 100644
index 0000000..997bbc8
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/10.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/11.png b/doc/website-v1/img/icons/callouts/11.png
new file mode 100644
index 0000000..ce47dac
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/11.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/12.png b/doc/website-v1/img/icons/callouts/12.png
new file mode 100644
index 0000000..31daf4e
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/12.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/13.png b/doc/website-v1/img/icons/callouts/13.png
new file mode 100644
index 0000000..14021a8
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/13.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/14.png b/doc/website-v1/img/icons/callouts/14.png
new file mode 100644
index 0000000..64014b7
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/14.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/15.png b/doc/website-v1/img/icons/callouts/15.png
new file mode 100644
index 0000000..0d65765
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/15.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/2.png b/doc/website-v1/img/icons/callouts/2.png
new file mode 100644
index 0000000..5d09341
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/2.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/3.png b/doc/website-v1/img/icons/callouts/3.png
new file mode 100644
index 0000000..ef7b700
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/3.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/4.png b/doc/website-v1/img/icons/callouts/4.png
new file mode 100644
index 0000000..adb8364
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/4.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/5.png b/doc/website-v1/img/icons/callouts/5.png
new file mode 100644
index 0000000..4d7eb46
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/5.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/6.png b/doc/website-v1/img/icons/callouts/6.png
new file mode 100644
index 0000000..0ba694a
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/6.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/7.png b/doc/website-v1/img/icons/callouts/7.png
new file mode 100644
index 0000000..472e96f
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/7.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/8.png b/doc/website-v1/img/icons/callouts/8.png
new file mode 100644
index 0000000..5e60973
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/8.png
Binary files differ
diff --git a/doc/website-v1/img/icons/callouts/9.png b/doc/website-v1/img/icons/callouts/9.png
new file mode 100644
index 0000000..a0676d2
--- /dev/null
+++ b/doc/website-v1/img/icons/callouts/9.png
Binary files differ
diff --git a/doc/website-v1/img/icons/caution.png b/doc/website-v1/img/icons/caution.png
new file mode 100644
index 0000000..9a8c515
--- /dev/null
+++ b/doc/website-v1/img/icons/caution.png
Binary files differ
diff --git a/doc/website-v1/img/icons/example.png b/doc/website-v1/img/icons/example.png
new file mode 100644
index 0000000..1199e86
--- /dev/null
+++ b/doc/website-v1/img/icons/example.png
Binary files differ
diff --git a/doc/website-v1/img/icons/home.png b/doc/website-v1/img/icons/home.png
new file mode 100644
index 0000000..37a5231
--- /dev/null
+++ b/doc/website-v1/img/icons/home.png
Binary files differ
diff --git a/doc/website-v1/img/icons/important.png b/doc/website-v1/img/icons/important.png
new file mode 100644
index 0000000..be685cc
--- /dev/null
+++ b/doc/website-v1/img/icons/important.png
Binary files differ
diff --git a/doc/website-v1/img/icons/next.png b/doc/website-v1/img/icons/next.png
new file mode 100644
index 0000000..64e126b
--- /dev/null
+++ b/doc/website-v1/img/icons/next.png
Binary files differ
diff --git a/doc/website-v1/img/icons/note.png b/doc/website-v1/img/icons/note.png
new file mode 100644
index 0000000..7c1f3e2
--- /dev/null
+++ b/doc/website-v1/img/icons/note.png
Binary files differ
diff --git a/doc/website-v1/img/icons/prev.png b/doc/website-v1/img/icons/prev.png
new file mode 100644
index 0000000..3e8f12f
--- /dev/null
+++ b/doc/website-v1/img/icons/prev.png
Binary files differ
diff --git a/doc/website-v1/img/icons/tip.png b/doc/website-v1/img/icons/tip.png
new file mode 100644
index 0000000..f087c73
--- /dev/null
+++ b/doc/website-v1/img/icons/tip.png
Binary files differ
diff --git a/doc/website-v1/img/icons/up.png b/doc/website-v1/img/icons/up.png
new file mode 100644
index 0000000..2db1ce6
--- /dev/null
+++ b/doc/website-v1/img/icons/up.png
Binary files differ
diff --git a/doc/website-v1/img/icons/warning.png b/doc/website-v1/img/icons/warning.png
new file mode 100644
index 0000000..d41edb9
--- /dev/null
+++ b/doc/website-v1/img/icons/warning.png
Binary files differ
diff --git a/doc/website-v1/img/laptop.png b/doc/website-v1/img/laptop.png
new file mode 100644
index 0000000..2f831ba
--- /dev/null
+++ b/doc/website-v1/img/laptop.png
Binary files differ
diff --git a/doc/website-v1/img/loader.gif b/doc/website-v1/img/loader.gif
new file mode 100644
index 0000000..b2cfedb
--- /dev/null
+++ b/doc/website-v1/img/loader.gif
Binary files differ
diff --git a/doc/website-v1/img/servers.gif b/doc/website-v1/img/servers.gif
new file mode 100644
index 0000000..20afdcb
--- /dev/null
+++ b/doc/website-v1/img/servers.gif
Binary files differ
diff --git a/doc/website-v1/include/history-guide/basic-transition.typescript b/doc/website-v1/include/history-guide/basic-transition.typescript
new file mode 100644
index 0000000..a5a0a31
--- /dev/null
+++ b/doc/website-v1/include/history-guide/basic-transition.typescript
@@ -0,0 +1,22 @@
+crm(live)history# transition <1>
+INFO: running ptest with /var/cache/crm/history/live/sle12-c/pengine/pe-input-1907.bz2
+INFO: starting dotty to show transition graph <2>
+Current cluster status: <3>
+Online: [ sle12-a sle12-c ]
+ s-libvirt (stonith:external/libvirt): Started sle12-c
+ ...
+ small-apache (ocf::heartbeat:apache): Stopped
+Transition Summary:
+ * Start small-apache (sle12-a)
+Executing cluster transition:
+ * Resource action: small-apache start on sle12-a
+Revised cluster status:
+Online: [ sle12-a sle12-c ]
+ s-libvirt (stonith:external/libvirt): Started sle12-c
+ ...
+ small-apache (ocf::heartbeat:apache): Started sle12-a
+
+Transition sle12-c:pe-input-1907 (20:30:14 - 20:30:15): <4>
+ total 1 actions: 1 Complete
+Apr 15 20:30:14 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: start small-apache_start_0 on sle12-a
+Apr 15 20:30:14 sle12-a apache(small-apache)[1586]: INFO: AH00558: httpd2: Could not reliably determine the server's fully qualified domain name, using 10.2.12.51. Set the 'ServerName' directive globally to suppress this message
diff --git a/doc/website-v1/include/history-guide/diff.typescript b/doc/website-v1/include/history-guide/diff.typescript
new file mode 100644
index 0000000..129febc
--- /dev/null
+++ b/doc/website-v1/include/history-guide/diff.typescript
@@ -0,0 +1,11 @@
+crm(live)history# diff -1 0 <1>
+--- -1
++++ 0
+@@ -11 +11 @@
+-primitive small-apache apache params configfile="/etc/apache2/small.conf" meta target-role=Stopped
++primitive small-apache apache params configfile="/etc/apache2/small.conf" meta target-role=Started
+crm(live)history# diff -1 0 status <2>
+--- -1
++++ 0
+@@ -15 +14,0 @@
+- small-apache (ocf::heartbeat:apache): Started sle12-a
diff --git a/doc/website-v1/include/history-guide/info.typescript b/doc/website-v1/include/history-guide/info.typescript
new file mode 100644
index 0000000..d7aae8d
--- /dev/null
+++ b/doc/website-v1/include/history-guide/info.typescript
@@ -0,0 +1,16 @@
+# crm history
+crm(live)history# timeframe "Apr 15 20:25" "Apr 15 20:35"
+crm(live)history# info
+Source: live
+Created on: Thu Apr 16 11:32:36 CEST 2015
+By: report -Z -Q -f Wed Apr 15 20:25:00 2015 -t 2015-04-15 20:35:00 /var/cache/crm/history/live
+Period: 2015-04-15 20:25:00 - 2015-04-15 20:35:00
+Nodes: sle12-a sle12-c
+Groups: nfs-srv nfs-disk
+Resources: s-libvirt p_drbd_nfs nfs-vg fs1 virtual-ip nfs-server websrv websrv-ip small-apache
+Transitions: 1906 1907
+crm(live)history# peinputs v
+Date Start End Filename Client User Origin
+==== ===== === ======== ====== ==== ======
+2015-04-15 20:29:59 20:30:01 pe-input-1906 no-client no-user no-origin
+2015-04-15 20:30:14 20:30:15 pe-input-1907 no-client no-user no-origin
diff --git a/doc/website-v1/include/history-guide/nfs-probe-err.typescript b/doc/website-v1/include/history-guide/nfs-probe-err.typescript
new file mode 100644
index 0000000..ca34ba5
--- /dev/null
+++ b/doc/website-v1/include/history-guide/nfs-probe-err.typescript
@@ -0,0 +1,20 @@
+# crm history resource nfs-server
+INFO: fetching new logs, please wait ...
+Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14911]: <1> ERROR: NFS server is up, but the locking daemons are down
+Dec 16 11:53:23 sle12-c crmd[2823]: notice: te_rsc_command: Initiating action 54: stop nfs-server_stop_0 on sle12-a
+Dec 16 11:53:23 sle12-c crmd[2823]: notice: te_rsc_command: Initiating action 3: stop nfs-server_stop_0 on sle12-c (local)
+Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: Stopping NFS server ...
+Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: Stopping sm-notify
+Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: Stopping rpc.statd
+Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: NFS server stopped
+Dec 16 11:53:23 sle12-c crmd[2823]: notice: te_rsc_command: Initiating action 55: start nfs-server_start_0 on sle12-a
+Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: Stopping NFS server ...
+Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: Stopping sm-notify
+Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: Stopping rpc.statd
+Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: NFS server stopped
+Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23320]: INFO: Starting NFS server ...
+Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23320]: INFO: Starting rpc.statd.
+Dec 16 11:53:24 sle12-a nfsserver(nfs-server)[23320]: INFO: executing sm-notify
+Dec 16 11:53:24 sle12-a nfsserver(nfs-server)[23320]: INFO: NFS server started
+Dec 16 11:53:24 sle12-a lrmd[6904]: <2> notice: operation_finished: nfs-server_start_0:23320:stderr [ id: rpcuser: no such user ]
+Dec 16 11:53:24 sle12-a lrmd[6904]: message repeated 3 times: [ notice: operation_finished: nfs-server_start_0:23320:stderr [ id: rpcuser: no such user ]]
diff --git a/doc/website-v1/include/history-guide/resource-trace.typescript b/doc/website-v1/include/history-guide/resource-trace.typescript
new file mode 100644
index 0000000..e66ff7c
--- /dev/null
+++ b/doc/website-v1/include/history-guide/resource-trace.typescript
@@ -0,0 +1,7 @@
+# crm resource trace nfs-server monitor 0
+INFO: Trace for nfs-server:monitor is written to /var/lib/heartbeat/trace_ra/
+INFO: Trace set, restart nfs-server to trace non-monitor operations
+# crm resource cleanup nfs-server
+Cleaning up nfs-server on sle12-a
+Cleaning up nfs-server on sle12-c
+Waiting for 2 replies from the CRMd.. OK
diff --git a/doc/website-v1/include/history-guide/resource.typescript b/doc/website-v1/include/history-guide/resource.typescript
new file mode 100644
index 0000000..90f0265
--- /dev/null
+++ b/doc/website-v1/include/history-guide/resource.typescript
@@ -0,0 +1,6 @@
+crm(live)history# resource small-apache
+Apr 15 20:29:59 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: stop small-apache_stop_0 on sle12-a
+Apr 15 20:29:59 sle12-a apache(small-apache)[1366]: INFO: Attempting graceful stop of apache PID 9155
+Apr 15 20:30:01 sle12-a apache(small-apache)[1366]: INFO: apache stopped.
+Apr 15 20:30:14 sle12-a apache(small-apache)[1586]: INFO: AH00558: httpd2: Could not reliably determine the server's fully qualified domain name, using 10.2.12.51. Set the 'ServerName' directive globally to suppress this message
+Apr 15 20:30:14 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: start small-apache_start_0 on sle12-a
diff --git a/doc/website-v1/include/history-guide/sample-cluster.conf.crm b/doc/website-v1/include/history-guide/sample-cluster.conf.crm
new file mode 100644
index 0000000..8b44663
--- /dev/null
+++ b/doc/website-v1/include/history-guide/sample-cluster.conf.crm
@@ -0,0 +1,54 @@
+node 167906357: sle12-c
+node 167906355: sle12-a
+primitive s-libvirt stonith:external/libvirt \
+ params hostlist="sle12-a sle12-c" hypervisor_uri="qemu+ssh://hex-10.suse.de/system?keyfile=/root/.ssh/xen" reset_method=reboot \
+ op monitor interval=5m timeout=60s
+primitive p_drbd_nfs ocf:linbit:drbd \
+ params drbd_resource=nfs \
+ op monitor interval=15 role=Master \
+ op monitor interval=30 role=Slave \
+ op start interval=0 timeout=300 \
+ op stop interval=0 timeout=120
+primitive nfs-vg LVM \
+ params volgrpname=nfs-vg
+primitive fs1 Filesystem \
+ params device="/dev/nfs-vg/fs1" directory="/srv/nfs" fstype=ext3 \
+ op monitor interval=30s
+primitive virtual-ip IPaddr2 \
+ params ip=10.2.12.100
+primitive nfs-server nfsserver \
+ params nfs_shared_infodir="/srv/nfs/state" nfs_ip=10.2.12.100 \
+ op monitor interval=30s
+primitive websrv apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=30
+primitive websrv-ip IPaddr2 \
+ params ip=10.2.12.101
+primitive small-apache apache \
+ params configfile="/etc/apache2/small.conf"
+group nfs-disk nfs-vg fs1
+group nfs-srv virtual-ip nfs-server
+ms ms_drbd_nfs p_drbd_nfs \
+ meta notify=true clone-max=2
+location nfs-pref virtual-ip 100: sle12-a
+location websrv-pref websrv 100: sle12-c
+colocation vg-with-drbd inf: nfs-vg ms_drbd_nfs:Master
+colocation c-nfs inf: nfs-srv nfs-disk
+colocation c-websrv inf: websrv websrv-ip
+colocation small-apache-with-virtual-ip inf: small-apache virtual-ip
+# need fs1 for the NFS server
+order o-nfs inf: nfs-disk nfs-srv
+# websrv serves requests at IP websrv-ip
+order o-websrv inf: websrv-ip websrv
+# small apache serves requests at IP virtual-ip
+order virtual-ip-before-small-apache inf: virtual-ip small-apache
+# drbd device is the nfs-vg PV
+order drbd-before-nfs-vg inf: ms_drbd_nfs:promote nfs-vg:start
+property cib-bootstrap-options: \
+ dc-version=1.1.12-ad083a8 \
+ cluster-infrastructure=corosync \
+ cluster-name=sle12-test3l-public \
+ no-quorum-policy=ignore \
+ last-lrm-refresh=1429192263
+op_defaults op-options: \
+ timeout=120s
diff --git a/doc/website-v1/include/history-guide/status-probe-fail.typescript b/doc/website-v1/include/history-guide/status-probe-fail.typescript
new file mode 100644
index 0000000..d1024e8
--- /dev/null
+++ b/doc/website-v1/include/history-guide/status-probe-fail.typescript
@@ -0,0 +1,15 @@
+# crm status
+Last updated: Tue Dec 16 11:57:04 2014
+Last change: Tue Dec 16 11:53:22 2014
+Stack: corosync
+Current DC: sle12-c (167906357) - partition with quorum
+Version: 1.1.12-ad083a8
+2 Nodes configured
+10 Resources configured
+Online: [ sle12-a sle12-c ]
+[...]
+ nfs-server (ocf::heartbeat:nfsserver): Started sle12-a
+[...]
+Failed actions:
+ nfs-server_monitor_0 on sle12-c 'unknown error' (1): call=298, status=complete,
+ last-rc-change='Tue Dec 16 11:53:23 2014', queued=0ms, exec=135ms
diff --git a/doc/website-v1/include/history-guide/stonith-corosync-stopped.typescript b/doc/website-v1/include/history-guide/stonith-corosync-stopped.typescript
new file mode 100644
index 0000000..1bca5ac
--- /dev/null
+++ b/doc/website-v1/include/history-guide/stonith-corosync-stopped.typescript
@@ -0,0 +1,8 @@
+# crm history node sle12-c
+INFO: fetching new logs, please wait ...
+Dec 19 14:36:18 sle12-c corosync[29551]: [MAIN ] Corosync Cluster Engine ('2.3.3'): started and ready to provide service.
+Dec 19 14:36:19 sle12-c corosync[29545]: Starting Corosync Cluster Engine (corosync): [ OK ]
+Dec 19 14:36:20 sle12-a pengine[6906]: warning: pe_fence_node: Node sle12-c will be fenced because our peer process is no longer available
+Dec 19 14:36:20 sle12-a pengine[6906]: warning: stage6: Scheduling Node sle12-c for STONITH
+Dec 19 14:36:20 sle12-a crmd[6907]: notice: te_fence_node: Executing reboot fencing operation (65) on sle12-c (timeout=60000)
+Dec 19 14:36:20 sle12-a crmd[6907]: notice: peer_update_callback: Node return implies stonith of sle12-c (action 65) completed
diff --git a/doc/website-v1/include/history-guide/transition-log.typescript b/doc/website-v1/include/history-guide/transition-log.typescript
new file mode 100644
index 0000000..eb689ec
--- /dev/null
+++ b/doc/website-v1/include/history-guide/transition-log.typescript
@@ -0,0 +1,13 @@
+crm(live)history# transition log
+INFO: retrieving information from cluster nodes, please wait ...
+Apr 15 20:30:14 sle12-c crmd[1136]: notice: do_state_transition: State transition S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph ]
+Apr 15 20:30:14 sle12-c stonithd[1132]: notice: unpack_config: On loss of CCM Quorum: Ignore
+Apr 15 20:30:14 sle12-c pengine[1135]: notice: unpack_config: On loss of CCM Quorum: Ignore
+Apr 15 20:30:14 sle12-c pengine[1135]: notice: LogActions: Start small-apache#011(sle12-a)
+Apr 15 20:30:14 sle12-c crmd[1136]: notice: do_te_invoke: Processing graph 123 (ref=pe_calc-dc-1429122614-234) derived from /var/lib/pacemaker/pengine/pe-input-1907.bz2
+Apr 15 20:30:14 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: start small-apache_start_0 on sle12-a
+Apr 15 20:30:14 sle12-c pengine[1135]: notice: process_pe_message: Calculated Transition 123: /var/lib/pacemaker/pengine/pe-input-1907.bz2
+Apr 15 20:30:14 sle12-a stonithd[1160]: notice: unpack_config: On loss of CCM Quorum: Ignore
+Apr 15 20:30:14 sle12-a apache(small-apache)[1586]: INFO: AH00558: httpd2: Could not reliably determine the server's fully qualified domain name, using 10.2.12.51. Set the 'ServerName' directive globally to suppress this message
+Apr 15 20:30:14 sle12-a crmd[1164]: notice: process_lrm_event: Operation small-apache_start_0: ok (node=sle12-a, call=69, rc=0, cib-update=48, confirmed=true)
+Apr 15 20:30:15 sle12-c crmd[1136]: notice: run_graph: Transition 123 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-1907.bz2): Complete
diff --git a/doc/website-v1/index.adoc b/doc/website-v1/index.adoc
new file mode 100644
index 0000000..1ebd635
--- /dev/null
+++ b/doc/website-v1/index.adoc
@@ -0,0 +1,25 @@
+The CRM Shell
+=============
+
+++++
+<div class="frontpage-image">
+<br><br>
+<img src="/img/laptop.png">
+<br><br><br>
+</div>
+++++
+
+*`crmsh` is a cluster management shell* for the Pacemaker High Availability stack.
+
+Configure, manage and troubleshoot clusters from the command line,
+with full tab completion and extensive help. `crmsh` also provides
+advanced features like low-level cluster configuration, cluster scripting,
+package management, and history exploration tools giving you a complete
+insight into the state of your cluster.
+
+* https://github.com/ClusterLabs/crmsh/[Source Code]
+* http://crmsh.github.io/man-4.3/[Reference Manual (v4.3.1)]
+* http://crmsh.github.io/man-3/[Reference Manual (v3.0.0)]
+* http://crmsh.github.io/man-2.0/[Reference Manual (v2.3.2)]
+* https://build.opensuse.org/package/show/network:ha-clustering:Stable/crmsh[Packages]
+* http://clusterlabs.org[Cluster Labs]
diff --git a/doc/website-v1/installation.adoc b/doc/website-v1/installation.adoc
new file mode 100644
index 0000000..5a027a0
--- /dev/null
+++ b/doc/website-v1/installation.adoc
@@ -0,0 +1,4 @@
+Installation
+============
+
+See link:/download[Download].
diff --git a/doc/website-v1/make-news.py b/doc/website-v1/make-news.py
new file mode 100644
index 0000000..f3c9073
--- /dev/null
+++ b/doc/website-v1/make-news.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+"""
+Output a combined news.adoc document
+Also write an Atom feed document
+"""
+
+import os
+import sys
+import hashlib
+import datetime
+import time
+
+OUTPUT_HEADER = """= News
+
+"""
+OUTPUT_FOOTER = """
+link:https://savannah.nongnu.org/news/?group_id=10890[Old News Archive]
+"""
+
+ATOM_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom">
+<title>crmsh</title>
+<subtitle>Cluster manager shell news</subtitle>
+<link href="http://crmsh.github.io/atom.xml" rel="self" />
+<link href="http://crmsh.github.io/" />
+<id>%(id)s</id>
+<updated>%(updated)s</updated>
+%(entries)s
+</feed>
+"""
+
+ATOM_NAME = "gen/atom.xml"
+
+root_id = "tag:crmsh.github.io,2014:/atom"
+
+def escape(s):
+ s = s.replace('&', '&amp;')
+ s = s.replace('<', '&lt;')
+ s = s.replace('>', '&gt;')
+ s = s.replace('"', "&quot;")
+ return s
+
+class Entry(object):
+ def __init__(self, fname):
+ self.filename = fname
+ self.name = os.path.splitext(os.path.basename(fname))[0]
+ with open(fname) as f:
+ self.title = f.readline().strip()
+ f.readline()
+ l = f.readline()
+ while l.startswith(':'):
+ k, v = l[1:].split(':', 1)
+ k = k.lower()
+ v = v.strip()
+ setattr(self, k, v)
+ l = f.readline()
+ self.content = l + f.read()
+ if not hasattr(self, 'author'):
+ raise ValueError("Missing author")
+ if not hasattr(self, 'email'):
+ raise ValueError("Missing email")
+ if not hasattr(self, 'date'):
+ raise ValueError("Missing date")
+
+ def atom_id(self):
+ return root_id + '::' + hashlib.sha1(self.filename).hexdigest()
+
+ def atom_date(self):
+ return self.date.replace(' ', 'T') + ':00' + time.tzname[0]
+
+ def date_obj(self):
+ from dateutil import parser
+ return (parser.parse(self.date))
+
+ def atom_content(self):
+ return escape('<pre>\n' + self.content + '\n</pre>\n')
+
+ def atom(self):
+ data = {'title': self.title,
+ 'id': self.atom_id(),
+ 'updated': self.atom_date(),
+ 'name': self.name,
+ 'content': self.atom_content(),
+ 'author': self.author,
+ 'email': self.email}
+ return """<entry>
+<title>%(title)s</title>
+<id>%(id)s</id>
+<updated>%(updated)s</updated>
+<link>http://crmsh.github.io/news/%(name)s</link>
+<content type="html">
+%(content)s
+</content>
+<author>
+<name>%(author)s</name>
+<email>%(email)s</email>
+</author>
+</entry>
+""" % data
+
+
+def sort_entries(entries):
+ return list(reversed(sorted(entries, key=lambda e: e.date_obj())))
+
+
+def make_atom():
+ inputs = sort_entries([Entry(f) for f in sys.argv[2:]])
+ with open(ATOM_NAME, 'w') as output:
+ output.write(ATOM_TEMPLATE % {
+ 'id': root_id,
+ 'updated': inputs[0].atom_date(),
+ 'entries': '\n'.join(f.atom() for f in inputs)
+ })
+
+
+def main():
+ # TODO: sort by date
+ inputs = sort_entries([Entry(f) for f in sys.argv[2:]])
+ with open(sys.argv[1], 'w') as output:
+ output.write(OUTPUT_HEADER)
+ e = inputs[0]
+ output.write("link:/news/%s[%s]\n\n" % (e.name, e.date))
+ output.write(":leveloffset: 1\n\n")
+ output.write("include::%s[]\n\n" % (e.filename))
+ output.write(":leveloffset: 0\n\n")
+
+ output.write("''''\n")
+ for e in inputs[1:]:
+ output.write("* link:/news/%s[%s %s]\n" % (e.name, e.date, e.title))
+ output.write(OUTPUT_FOOTER)
+
+if __name__ == "__main__":
+ if sys.argv[1] == ATOM_NAME:
+ make_atom()
+ else:
+ main()
diff --git a/doc/website-v1/man-1.2.adoc b/doc/website-v1/man-1.2.adoc
new file mode 100644
index 0000000..d945719
--- /dev/null
+++ b/doc/website-v1/man-1.2.adoc
@@ -0,0 +1,3437 @@
+:man source: crm
+:man version: 1.2.6
+:man manual: crmsh documentation
+
+crm(8)
+======
+
+NOTE: This is the documentation for stable release 1.2.6 of `crmsh`.
+
+
+NAME
+----
+crm - Pacemaker command line interface for configuration and management
+
+
+SYNOPSIS
+--------
+*crm* [-D output_type] [-f file] [-c cib] [-H hist_src] [-hFRDw] [--version] [args]
+
+
+[[topics_Description,Program description]]
+DESCRIPTION
+-----------
+Pacemaker configuration is stored in a CIB file (Cluster
+Information Base). The CIB is a set of instructions coded in XML.
+Editing the CIB is a challenge, not only due to its complexity
+and a wide variety of options, but also because XML is more
+computer than user friendly. The `crm` shell alleviates this
+issue significantly by introducing small and simple configuration
+language. The CIB is translated into this language on the fly.
+
+`crm` is also a management tool. For management tasks it relies
+almost exclusively on other command line tools, such as
+`crm_resource(8)` or `crm_attribute(8)`. Use of these programs
+is, however, plagued by the notorious weakness common to all UNIX
+tools: a multitude of options, necessary for operation and yet
+very hard to remember. `crm` tries to present a consistent
+interface to the user and to hide the arcane detail.
+
+It may be used either as an interactive shell or for single
+commands directly on the shell's command line. It is also
+possible to feed it a set of commands from standard input or a
+file, thus turning it into a scripting tool. Templates with ready
+made configurations may help newbies learn about the cluster
+configuration or facilitate testing procedures.
+
+The `crm` shell is line oriented: every command must start and
+finish on the same line. It is possible to use a continuation
+character (`\`) to write one command in two or more lines. The
+continuation character is commonly used when displaying
+configurations.
+
+OPTIONS
+-------
+*-f, --file*='FILE'::
+ Load commands from the given file. If the file is `-` then
+ use terminal `stdin`.
+
+*-c, --cib*='CIB'::
+ Start the session with the given shadow CIB file.
+ Equivalent to `cib use`.
+
+*-D, --display=*'OUTPUT_TYPE'::
+ Choose one of the output options: `plain`, `color`, or
+ `uppercase`. The default is `color` if the terminal emulation
+ supports colors. Otherwise, `plain` is used.
+
+*-F, --force*::
+ Make `crm` proceed with doing changes even though it would
+ normally ask user to confirm some of them. Mostly useful in
+ scripts.
+
+*-w, --wait*::
+ Make `crm` wait for the cluster transition to finish (for the
+ changes to take effect) after each processed line.
+
+*-H, --history*='DIR|FILE'::
+ The `history` commands can examine either live cluster
+ (default) or a report generated by `hb_report`. Use this
+ option to specify a directory or file containing the report.
+
+*-h, --help*::
+ Print help page.
+
+*--version*::
+ Print crmsh version and build information (Mercurial Hg
+ changeset hash).
+
+*-R, --regression-tests*::
+ Run in the regression test mode. Used mainly by the
+ regression testing suite.
+
+*-d, --debug*::
+ Print some debug information. Used by developers. [Not yet
+ refined enough to print useful information for other users.]
+
+[[topics_Introduction,Introduction to the user interface]]
+== Introduction to the user interface
+
+Arguably the most important aspect of `crm` is the user
+interface. We begin with an informal introduction so that the
+reader may get acquainted with it and get a general feeling of
+the tool. It is probably best just to give some examples:
+
+1. Command line (one-shot) use:
+
+ # crm resource stop www_app
+
+2. Interactive use:
+
+ # crm
+ crm(live)# resource
+ crm(live)resource# unmanage tetris_1
+ crm(live)resource# end
+ crm(live)# node standby node4
+
+3. Cluster configuration:
+
+ # crm configure<<EOF
+ #
+ # resources
+ #
+ primitive disk0 iscsi \
+ params portal=192.168.2.108:3260 target=iqn.2008-07.com.suse:disk0
+ primitive fs0 Filesystem \
+ params device=/dev/disk/by-label/disk0 directory=/disk0 fstype=ext3
+ primitive internal_ip IPaddr params ip=192.168.1.101
+ primitive apache apache \
+ params configfile=/disk0/etc/apache2/site0.conf
+ primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s
+ primitive pingd pingd \
+ params name=pingd dampen=5s multiplier=100 host_list="r1 r2"
+ #
+ # monitor apache and the UPS
+ #
+ monitor apache 60s:30s
+ monitor apcfence 120m:60s
+ #
+ # cluster layout
+ #
+ group internal_www \
+ disk0 fs0 internal_ip apache
+ clone fence apcfence \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ clone conn pingd \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ location node_pref internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+ #
+ # cluster properties
+ #
+ property stonith-enabled=true
+ commit
+ EOF
+
+If you've ever done a CRM style configuration, you should be able
+to understand the above examples without much difficulties. The
+shell should provide a means to manage the cluster efficiently or
+put together a configuration in a concise manner.
+
+The `(live)` string in the prompt signifies that the current CIB
+in use is the cluster live configuration. It is also possible to
+work with the so-called shadow CIBs, i.e. configurations which
+are stored in files and aren't active, but may be applied at any
+time to the cluster.
+
+Since the CIB is hierarchical such is the interface too. There
+are several levels and entering each of them enables the user to
+use a certain set of commands.
+
+[[topics_Shadows,Shadow CIB usage]]
+== Shadow CIB usage
+
+Shadow CIB is a normal cluster configuration stored in a file.
+They may be manipulated in the same way like the _live_ CIB, but
+these changes have no effect on the cluster resources. The
+administrator may choose to apply any of them to the cluster,
+thus replacing the running configuration with the one which is in
+the shadow CIB. The `crm` prompt always contains the name of the
+configuration which is currently in use or string _live_ if we
+are using the current cluster configuration.
+
+At the configure level no changes take place before the `commit`
+command. Sometimes though, the administrator may start working
+with the running configuration, but change mind and instead of
+committing the changes to the cluster save them to a shadow CIB.
+This short `configure` session excerpt shows how:
+...............
+ crm(live)configure# cib new test-2
+ INFO: test-2 shadow CIB created
+ crm(test-2)configure# commit
+...............
+
+[[topics_Templates,Configuration templates]]
+== Configuration templates
+
+Configuration templates are ready made configurations created by
+cluster experts. They are designed in such a way so that users
+may generate valid cluster configurations with minimum effort.
+If you are new to Pacemaker, templates may be the best way to
+start.
+
+We will show here how to create a simple yet functional Apache
+configuration:
+...............
+ # crm configure
+ crm(live)configure# template
+ crm(live)configure template# list templates
+ apache filesystem virtual-ip
+ crm(live)configure template# new web <TAB><TAB>
+ apache filesystem virtual-ip
+ crm(live)configure template# new web apache
+ INFO: pulling in template apache
+ INFO: pulling in template virtual-ip
+ crm(live)configure template# list
+ web2-d web2 vip2 web3 vip web
+...............
+
+We enter the `template` level from `configure`. Use the `list`
+command to show templates available on the system. The `new`
+command creates a configuration from the `apache` template. You
+can use tab completion to pick templates. Note that the apache
+template depends on a virtual IP address which is automatically
+pulled along. The `list` command shows the just created `web`
+configuration, among other configurations (I hope that you,
+unlike me, will use more sensible and descriptive names).
+
+The `show` command, which displays the resulting configuration,
+may be used to get an idea about the minimum required changes
+which have to be done. All `ERROR` messages show the line numbers
+in which the respective parameters are to be defined:
+...............
+ crm(live)configure template# show
+ ERROR: 23: required parameter ip not set
+ ERROR: 61: required parameter id not set
+ ERROR: 65: required parameter configfile not set
+ crm(live)configure template# edit
+...............
+
+The `edit` command invokes the preferred text editor with the
+`web` configuration. At the top of the file, the user is advised
+how to make changes. A good template should require from the user
+to specify only parameters. For example, the `web` configuration
+we created above has the following required and optional
+parameters (all parameter lines start with `%%`):
+...............
+ $ grep -n ^%% ~/.crmconf/web
+ 23:%% ip
+ 31:%% netmask
+ 35:%% lvs_support
+ 61:%% id
+ 65:%% configfile
+ 71:%% options
+ 76:%% envfiles
+...............
+
+These lines are the only ones that should be modified. Simply
+append the parameter value at the end of the line. For instance,
+after editing this template, the result could look like this (we
+used tabs instead of spaces to make the values stand out):
+...............
+ $ grep -n ^%% ~/.crmconf/web
+ 23:%% ip 192.168.1.101
+ 31:%% netmask
+ 35:%% lvs_support
+ 61:%% id websvc
+ 65:%% configfile /etc/apache2/httpd.conf
+ 71:%% options
+ 76:%% envfiles
+...............
+
+As you can see, the parameter line format is very simple:
+...............
+ %% <name> <value>
+...............
+
+After editing the file, use `show` again to display the
+configuration:
+...............
+ crm(live)configure template# show
+ primitive virtual-ip ocf:heartbeat:IPaddr \
+ params ip="192.168.1.101"
+ primitive apache ocf:heartbeat:apache \
+ params configfile="/etc/apache2/httpd.conf"
+ monitor apache 120s:60s
+ group websvc \
+ apache virtual-ip
+...............
+
+The target resource of the apache template is a group which we
+named `websvc` in this sample session.
+
+This configuration looks exactly as you could type it at the
+`configure` level. The point of templates is to save you some
+typing. It is important, however, to understand the configuration
+produced.
+
+Finally, the configuration may be applied to the current
+crm configuration (note how the configuration changed slightly,
+though it is still equivalent, after being digested at the
+`configure` level):
+...............
+ crm(live)configure template# apply
+ crm(live)configure template# cd ..
+ crm(live)configure# show
+ node xen-b
+ node xen-c
+ primitive apache ocf:heartbeat:apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval="120s" timeout="60s"
+ primitive virtual-ip ocf:heartbeat:IPaddr \
+ params ip="192.168.1.101"
+ group websvc apache virtual-ip
+...............
+
+Note that this still does not commit the configuration to the CIB
+which is used in the shell, either the running one (`live`) or
+some shadow CIB. For that you still need to execute the `commit`
+command.
+
+To complete our example, we should also define the preferred node
+to run the service:
+...............
+ crm(live)configure# location websvc-pref websvc 100: xen-b
+...............
+
+If you are not happy with some resource names which are provided
+by default, you can rename them now:
+...............
+ crm(live)configure# rename virtual-ip intranet-ip
+ crm(live)configure# show
+ node xen-b
+ node xen-c
+ primitive apache ocf:heartbeat:apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval="120s" timeout="60s"
+ primitive intranet-ip ocf:heartbeat:IPaddr \
+ params ip="192.168.1.101"
+ group websvc apache intranet-ip
+ location websvc-pref websvc 100: xen-b
+...............
+
+To summarize, working with templates typically consists of the
+following steps:
+
+- `new`: create a new configuration from templates
+- `edit`: define parameters, at least the required ones
+- `show`: see if the configuration is valid
+- `apply`: apply the configuration to the `configure` level
+
+[[topics_Testing,Resource testing]]
+== Resource testing
+
+The amount of detail in a cluster makes all configurations prone
+to errors. By far the largest number of issues in a cluster is
+due to bad resource configuration. The shell can help quickly
+diagnose such problems. And considerably reduce your keyboard
+wear.
+
+Let's say that we entered the following configuration:
+...............
+ node xen-b
+ node xen-c
+ node xen-d
+ primitive fencer stonith:external/libvirt \
+ params hypervisor_uri="qemu+tcp://10.2.13.1/system" \
+ hostlist="xen-b xen-c xen-d" \
+ op monitor interval="2h"
+ primitive svc ocf:heartbeat:Xinetd \
+ params service="systat" \
+ op monitor interval="30s"
+ primitive intranet-ip ocf:heartbeat:IPaddr2 \
+ params ip="10.2.13.100" \
+ op monitor interval="30s"
+ primitive apache ocf:heartbeat:apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval="120s" timeout="60s"
+ group websvc apache intranet-ip
+ location websvc-pref websvc 100: xen-b
+...............
+
+Before typing `commit` to submit the configuration to the cib we
+can make sure that all resources are usable on all nodes:
+...............
+ crm(live)configure# rsctest websvc svc fencer
+...............
+
+It is important that resources being tested are not running on
+any nodes. Otherwise, the `rsctest` command will refuse to do
+anything. Of course, if the current configuration resides in a
+CIB shadow, then a `commit` is irrelevant. The point being that
+resources are not running on any node.
+
+.Note on stopping all resources
+****************************
+Alternatively to not committing a configuration, it is also
+possible to tell Pacemaker not to start any resources:
+
+...............
+ crm(live)configure# property stop-all-resources="yes"
+...............
+Almost none---resources of class stonith are still started. But
+shell is not as strict when it comes to stonith resources.
+****************************
+
+Order of resources is significant insofar that a resource depends
+on all resources to its left. In most configurations, it's
+probably practical to test resources in several runs, based on
+their dependencies.
+
+Apart from groups, `crm` does not interpret constraints and
+therefore knows nothing about resource dependencies. It also
+doesn't know if a resource can run on a node at all in case of an
+asymmetric cluster. It is up to the user to specify a list of
+eligible nodes if a resource is not meant to run on every node.
+
+[[topics_Completion,Tab completion]]
+== Tab completion
+
+The `crm` makes extensive use of tab completion. The completion
+is both static (i.e. for `crm` commands) and dynamic. The latter
+takes into account the current status of the cluster or
+information from installed resource agents. Sometimes, completion
+may also be used to get short help on resource parameters. Here a
+few examples:
+...............
+ crm(live)# resource
+ crm(live)resource# <TAB><TAB>
+ bye failcount move restart unmigrate
+ cd help param show unmove
+ cleanup list promote start up
+ demote manage quit status utilization
+ end meta refresh stop
+ exit migrate reprobe unmanage
+ crm(live)resource# end
+ crm(live)# configure
+ crm(live)configure# primitive fence-1 <TAB><TAB>
+ heartbeat: lsb: ocf: stonith:
+ crm(live)configure# primitive fence-1 stonith:<TAB><TAB>
+ apcmaster external/ippower9258 fence_legacy
+ apcmastersnmp external/kdumpcheck ibmhmc
+ apcsmart external/libvirt ipmilan
+ baytech external/nut meatware
+ bladehpi external/rackpdu null
+ cyclades external/riloe nw_rpc100s
+ drac3 external/sbd rcd_serial
+ external/drac5 external/ssh rps10
+ external/dracmc-telnet external/ssh-bad ssh
+ external/hmchttp external/ssh-slow suicide
+ external/ibmrsa external/vmware wti_mpc
+ external/ibmrsa-telnet external/xen0 wti_nps
+ external/ipmi external/xen0-ha
+ crm(live)configure# primitive fence-1 stonith:ipmilan params <TAB><TAB>
+ auth= hostname= ipaddr= login= password= port= priv=
+ crm(live)configure# primitive fence-1 stonith:ipmilan params auth=<TAB><TAB>
+ auth* (string)
+ The authorization type of the IPMI session ("none", "straight", "md2", or "md5")
+ crm(live)configure# primitive fence-1 stonith:ipmilan params auth=
+...............
+
+[[topics_Checks,Configuration semantic checks]]
+== Configuration semantic checks
+
+Resource definitions may be checked against the meta-data
+provided with the resource agents. These checks are currently
+carried out:
+
+- are required parameters set
+- existence of defined parameters
+- timeout values for operations
+
+The parameter checks are obvious and need no further explanation.
+Failures in these checks are treated as configuration errors.
+
+The timeouts for operations should be at least as long as those
+recommended in the meta-data. Too short timeout values are a
+common mistake in cluster configurations and, even worse, they
+often slip through if cluster testing was not thorough. Though
+operation timeouts issues are treated as warnings, make sure that
+the timeouts are usable in your environment. Note also that the
+values given are just _advisory minimum_---your resources may
+require longer timeouts.
+
+User may tune the frequency of checks and the treatment of errors
+by the <<cmdhelp_options_check-frequency,`check-frequency`>> and
+<<cmdhelp_options_check-mode,`check-mode`>> preferences.
+
+Note that if the `check-frequency` is set to `always` and the
+`check-mode` to `strict`, errors are not tolerated and such
+configuration cannot be saved.
+
+[[topics_Security,Access Control Lists (ACL)]]
+== Access Control Lists (ACL)
+
+By default, the users from the `haclient` group have full access
+to the cluster (or, more precisely, to the CIB). Access control
+lists allow for finer access control to the cluster.
+
+Access control lists consist of an ordered set of access rules.
+Each rule allows read or write access or denies access
+completely. Rules are typically combined to produce a specific
+role. Then, users may be assigned a role.
+
+For instance, this is a role which defines a set of rules
+allowing management of a single resource:
+
+...............
+ role bigdb_admin \
+ write meta:bigdb:target-role \
+ write meta:bigdb:is-managed \
+ write location:bigdb \
+ read ref:bigdb
+...............
+
+The first two rules allow modifying the `target-role` and
+`is-managed` meta attributes which effectively enables users in
+this role to stop/start and manage/unmanage the resource. The
+constraints write access rule allows moving the resource around.
+Finally, the user is granted read access to the resource
+definition.
+
+For proper operation of all Pacemaker programs, it is advisable
+to add the following role to all users:
+
+...............
+ role read_all \
+ read cib
+...............
+
+For finer grained read access try with the rules listed in the
+following role:
+
+...............
+ role basic_read \
+ read node attribute:uname \
+ read node attribute:type \
+ read property \
+ read status
+...............
+
+It is however possible that some Pacemaker programs (e.g.
+`ptest`) may not function correctly if the whole CIB is not
+readable.
+
+Some of the ACL rules in the examples above are expanded by the
+shell to XPath specifications. For instance,
+`meta:bigdb:target-role` is a shortcut for
+`//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']`.
+You can see the expansion by showing XML:
+
+...............
+ crm(live) configure# show xml bigdb_admin
+ ...
+ <acls>
+ <acl_role id="bigdb_admin">
+ <write id="bigdb_admin-write"
+ xpath="//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']"/>
+...............
+
+Many different XPath expressions can have equal meaning. For
+instance, the following two are equal, but only the first one is
+going to be recognized as shortcut:
+
+...............
+ //primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+ //resources/primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+...............
+
+XPath is a powerful language, but you should try to keep your ACL
+xpaths simple and the builtin shortcuts should be used whenever
+possible.
+
+[[topics_Reference,Command reference]]
+== Command reference
+
+We define a small and simple language. Most commands consist of
+just a list of simple tokens. The only complex constructs are
+found at the `configure` level.
+
+The syntax is described in a somewhat informal manner: `<>`
+denotes a string, `[]` means that the construct is optional, the
+ellipsis (`...`) signifies that the previous construct may be
+repeated, `|` means pick one of many, and the rest are literals
+(strings, `:`, `=`).
+
+=== `status`
+
+Show cluster status. The status is displayed by `crm_mon`. Supply
+additional arguments for more information or different format.
+See `crm_mon(8)` for more details.
+
+Usage:
+...............
+ status [<option> ...]
+
+ option :: bynode | inactive | ops | timing | failcounts
+...............
+
+[[cmdhelp_cib,CIB shadow management]]
+=== `cib` (shadow CIBs)
+
+This level is for management of shadow CIBs. It is available both
+at the top level and the `configure` level.
+
+All the commands are implemented using `cib_shadow(8)` and the
+`CIB_shadow` environment variable. The user prompt always
+includes the name of the currently active shadow or the live CIB.
+
+[[cmdhelp_cib_new,create a new shadow CIB]]
+==== `new`
+
+Create a new shadow CIB. The live cluster configuration and
+status is copied to the shadow CIB. Specify `withstatus` if you
+want to edit the status section of the shadow CIB (see the
+<<cmdhelp_cibstatus,cibstatus section>>). Add `force` to force overwriting the
+existing shadow CIB.
+
+To start with an empty configuration that is not copied from the live
+CIB, specify the `empty` keyword. (This also allows a shadow CIB to be
+created in case no cluster is running.)
+
+Usage:
+...............
+ new <cib> [withstatus] [force] [empty]
+...............
+
+[[cmdhelp_cib_delete,delete a shadow CIB]]
+==== `delete`
+
+Delete an existing shadow CIB.
+
+Usage:
+...............
+ delete <cib>
+...............
+
+[[cmdhelp_cib_reset,copy live cib to a shadow CIB]]
+==== `reset`
+
+Copy the current cluster configuration into the shadow CIB.
+
+Usage:
+...............
+ reset <cib>
+...............
+
+[[cmdhelp_cib_commit,copy a shadow CIB to the cluster]]
+==== `commit`
+
+Apply a shadow CIB to the cluster.
+
+Usage:
+...............
+ commit <cib>
+...............
+
+[[cmdhelp_cib_use,change working CIB]]
+==== `use`
+
+Choose a CIB source. If you want to edit the status from the
+shadow CIB specify `withstatus` (see <<cmdhelp_cibstatus,`cibstatus`>>).
+Leave out the CIB name to switch to the running CIB.
+
+Usage:
+...............
+ use [<cib>] [withstatus]
+...............
+
+[[cmdhelp_cib_diff,diff between the shadow CIB and the live CIB]]
+==== `diff`
+
+Print differences between the current cluster configuration and
+the active shadow CIB.
+
+Usage:
+...............
+ diff
+...............
+
+[[cmdhelp_cib_list,list all shadow CIBs]]
+==== `list`
+
+List existing shadow CIBs.
+
+Usage:
+...............
+ list
+...............
+
+[[cmdhelp_cib_import,import a CIB or PE input file to a shadow]]
+==== `import`
+
+At times it may be useful to create a shadow file from the
+existing CIB. The CIB may be specified as file or as a PE input
+file number. The shell will look up files in the local directory
+first and then in the PE directory (typically `/var/lib/pengine`).
+Once the CIB file is found, it is copied to a shadow and this
+shadow is immediately available for use at both `configure` and
+`cibstatus` levels.
+
+If the shadow name is omitted then the target shadow is named
+after the input CIB file.
+
+Note that there are often more than one PE input file, so you may
+need to specify the full name.
+
+Usage:
+...............
+ import {<file>|<number>} [<shadow>]
+...............
+Examples:
+...............
+ import pe-warn-2222
+ import 2289 issue2
+...............
+
+[[cmdhelp_cib_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_ra,Resource Agents (RA) lists and documentation]]
+=== `ra`
+
+This level contains commands which show various information about
+the installed resource agents. It is available both at the top
+level and at the `configure` level.
+
+[[cmdhelp_ra_classes,list classes and providers]]
+==== `classes`
+
+Print all resource agents' classes and, where appropriate, a list
+of available providers.
+
+Usage:
+...............
+ classes
+...............
+
+[[cmdhelp_ra_list,list RA for a class (and provider)]]
+==== `list`
+
+List available resource agents for the given class. If the class
+is `ocf`, supply a provider to get agents which are available
+only from that provider.
+
+Usage:
+...............
+ list <class> [<provider>]
+...............
+Example:
+...............
+ list ocf pacemaker
+...............
+
+[[cmdhelp_ra_meta,show meta data for a RA]]
+==== `meta` (`info`)
+
+Show the meta-data of a resource agent type. This is where users
+can find information on how to use a resource agent. It is also
+possible to get information from some programs: `pengine`,
+`crmd`, `cib`, and `stonithd`. Just specify the program name
+instead of an RA.
+
+Usage:
+...............
+ info [<class>:[<provider>:]]<type>
+ info <type> <class> [<provider>] (obsolete)
+...............
+Example:
+...............
+ info apache
+ info ocf:pacemaker:Dummy
+ info stonith:ipmilan
+ info pengine
+...............
+
+[[cmdhelp_ra_providers,show providers for a RA and a class]]
+==== `providers`
+
+List providers for a resource agent type. The class parameter
+defaults to `ocf`.
+
+Usage:
+...............
+ providers <type> [<class>]
+...............
+Example:
+...............
+ providers apache
+...............
+
+[[cmdhelp_resource,Resource management]]
+=== `resource`
+
+At this level resources may be managed.
+
+All (or almost all) commands are implemented with the CRM tools
+such as `crm_resource(8)`.
+
+[[cmdhelp_resource_status,show status of resources]]
+==== `status` (`show`, `list`)
+
+Print resource status. If the resource parameter is left out
+status of all resources is printed.
+
+Usage:
+...............
+ status [<rsc>]
+...............
+
+[[cmdhelp_resource_start,start a resource]]
+==== `start`
+
+Start a resource by setting the `target-role` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `target-role` attributes
+are removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+ start <rsc>
+...............
+
+[[cmdhelp_resource_stop,stop a resource]]
+==== `stop`
+
+Stop a resource using the `target-role` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `target-role` attributes
+are removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+ stop <rsc>
+...............
+
+[[cmdhelp_resource_restart,restart a resource]]
+==== `restart`
+
+Restart a resource. This is essentially a shortcut for resource
+stop followed by a start. The shell is first going to wait for
+the stop to finish, that is for all resources to really stop, and
+only then to order the start action. Due to this command
+entailing a whole set of operations, informational messages are
+printed to let the user see some progress.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+ restart <rsc>
+...............
+Example:
+...............
+ # crm resource restart g_webserver
+ INFO: ordering g_webserver to stop
+ waiting for stop to finish .... done
+ INFO: ordering g_webserver to start
+ #
+...............
+
+[[cmdhelp_resource_promote,promote a master-slave resource]]
+==== `promote`
+
+Promote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+ promote <rsc>
+...............
+
+[[cmdhelp_resource_demote,demote a master-slave resource]]
+==== `demote`
+
+Demote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+ demote <rsc>
+...............
+
+[[cmdhelp_resource_manage,put a resource into managed mode]]
+==== `manage`
+
+Manage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+ manage <rsc>
+...............
+
+[[cmdhelp_resource_unmanage,put a resource into unmanaged mode]]
+==== `unmanage`
+
+Unmanage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+ unmanage <rsc>
+...............
+
+[[cmdhelp_resource_migrate,migrate a resource to another node]]
+==== `migrate` (`move`)
+
+Migrate a resource to a different node. If node is left out, the
+resource is migrated by creating a constraint which prevents it from
+running on the current node. Additionally, you may specify a
+lifetime for the constraint---once it expires, the location
+constraint will no longer be active.
+
+Usage:
+...............
+ migrate <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_unmigrate,unmigrate a resource to another node]]
+==== `unmigrate` (`unmove`)
+
+Remove the constraint generated by the previous migrate command.
+
+Usage:
+...............
+ unmigrate <rsc>
+...............
+
+[[cmdhelp_resource_param,manage a parameter of a resource]]
+==== `param`
+
+Show/edit/delete a parameter of a resource.
+
+Usage:
+...............
+ param <rsc> set <param> <value>
+ param <rsc> delete <param>
+ param <rsc> show <param>
+...............
+Example:
+...............
+ param ip_0 show ip
+...............
+
+[[cmdhelp_resource_secret,manage sensitive parameters]]
+==== `secret`
+
+Sensitive parameters can be kept in local files rather than CIB
+in order to prevent accidental data exposure. Use the `secret`
+command to manage such parameters. `stash` and `unstash` move the
+value from the CIB and back to the CIB respectively. The `set`
+subcommand sets the parameter to the provided value. `delete`
+removes the parameter completely. `show` displays the value of
+the parameter from the local file. Use `check` to verify if the
+local file content is valid.
+
+Usage:
+...............
+ secret <rsc> set <param> <value>
+ secret <rsc> stash <param>
+ secret <rsc> unstash <param>
+ secret <rsc> delete <param>
+ secret <rsc> show <param>
+ secret <rsc> check <param>
+...............
+Example:
+...............
+ secret fence_1 show password
+ secret fence_1 stash password
+ secret fence_1 set password secret_value
+...............
+
+[[cmdhelp_resource_meta,manage a meta attribute]]
+==== `meta`
+
+Show/edit/delete a meta attribute of a resource. Currently, all
+meta attributes of a resource may be managed with other commands
+such as `resource stop`.
+
+Usage:
+...............
+ meta <rsc> set <attr> <value>
+ meta <rsc> delete <attr>
+ meta <rsc> show <attr>
+...............
+Example:
+...............
+ meta ip_0 set target-role stopped
+...............
+
+[[cmdhelp_resource_utilization,manage a utilization attribute]]
+==== `utilization`
+
+Show/edit/delete a utilization attribute of a resource. These
+attributes describe hardware requirements. By setting the
+`placement-strategy` cluster property appropriately, it is
+possible then to distribute resources based on resource
+requirements and node size. See also <<cmdhelp_node_utilization,node utilization attributes>>.
+
+Usage:
+...............
+ utilization <rsc> set <attr> <value>
+ utilization <rsc> delete <attr>
+ utilization <rsc> show <attr>
+...............
+Example:
+...............
+ utilization xen1 set memory 4096
+...............
+
+[[cmdhelp_resource_failcount,manage failcounts]]
+==== `failcount`
+
+Show/edit/delete the failcount of a resource.
+
+Usage:
+...............
+ failcount <rsc> set <node> <value>
+ failcount <rsc> delete <node>
+ failcount <rsc> show <node>
+...............
+Example:
+...............
+ failcount fs_0 delete node2
+...............
+
+[[cmdhelp_resource_cleanup,cleanup resource status]]
+==== `cleanup`
+
+Cleanup resource status. Typically done after the resource has
+temporarily failed. If a node is omitted, cleanup on all nodes.
+If there are many nodes, the command may take a while.
+
+Usage:
+...............
+ cleanup <rsc> [<node>]
+...............
+
+[[cmdhelp_resource_refresh,refresh CIB from the LRM status]]
+==== `refresh`
+
+Refresh CIB from the LRM status.
+
+Usage:
+...............
+ refresh [<node>]
+...............
+
+[[cmdhelp_resource_reprobe,probe for resources not started by the CRM]]
+==== `reprobe`
+
+Probe for resources not started by the CRM.
+
+Usage:
+...............
+ reprobe [<node>]
+...............
+
+[[cmdhelp_resource_trace,start RA tracing]]
+==== `trace`
+
+Start tracing RA for the given operation. The trace files are
+stored in `$HA_VARLIB/trace_ra`. If the operation to be traced is
+monitor, note that the number of trace files can grow very
+quickly.
+
+Usage:
+...............
+ trace <rsc> <op> [<interval>]
+...............
+Example:
+...............
+ trace fs start
+...............
+
+[[cmdhelp_resource_untrace,stop RA tracing]]
+==== `untrace`
+
+Stop tracing RA for the given operation.
+
+Usage:
+...............
+ untrace <rsc> <op> [<interval>]
+...............
+Example:
+...............
+ untrace fs start
+...............
+
+[[cmdhelp_node,Nodes management]]
+=== `node`
+
+Node management and status commands.
+
+[[cmdhelp_node_status,show nodes' status as XML]]
+==== `status`
+
+Show nodes' status as XML. If the node parameter is omitted then
+all nodes are shown.
+
+Usage:
+...............
+ status [<node>]
+...............
+
+[[cmdhelp_node_show,show node]]
+==== `show`
+
+Show a node definition. If the node parameter is omitted then all
+nodes are shown.
+
+Usage:
+...............
+ show [<node>]
+...............
+
+[[cmdhelp_node_standby,put node into standby]]
+==== `standby`
+
+Set a node to standby status. The node parameter defaults to the
+node where the command is run. Additionally, you may specify a
+lifetime for the standby---if set to `reboot`, the node will be
+back online once it reboots. `forever` will keep the node in
+standby after reboot.
+
+Usage:
+...............
+ standby [<node>] [<lifetime>]
+
+ lifetime :: reboot | forever
+...............
+
+[[cmdhelp_node_online,set node online]]
+==== `online`
+
+Set a node to online status. The node parameter
+defaults to the node where the command is run.
+
+Usage:
+...............
+ online [<node>]
+...............
+
+[[cmdhelp_node_maintenance,put node into maintenance mode]]
+==== `maintenance`
+
+Set the node status to maintenance. This is equivalent to the
+cluster-wide `maintenance-mode` property but puts just one node
+into the maintenance mode. The node parameter defaults to the
+node where the command is run.
+
+Usage:
+...............
+ maintenance [<node>]
+...............
+
+[[cmdhelp_node_ready,put node into ready mode]]
+==== `ready`
+
+Set the node's maintenance status to `off`. The node should be
+now again fully operational and capable of running resource
+operations.
+
+Usage:
+...............
+ ready [<node>]
+...............
+
+[[cmdhelp_node_fence,fence node]]
+==== `fence`
+
+Make CRM fence a node. This functionality depends on stonith
+resources capable of fencing the specified node. No such stonith
+resources, no fencing will happen.
+
+Usage:
+...............
+ fence <node>
+...............
+
+[[cmdhelp_node_clearstate,Clear node state]]
+==== `clearnodestate`
+
+Resets and clears the state of the specified node. This node is
+afterwards assumed clean and offline. This command can be used to
+manually confirm that a node has been fenced (e.g., powered off).
+
+Be careful! This can cause data corruption if you confirm that a node is
+down that is, in fact, not cleanly down - the cluster will proceed as if
+the fence had succeeded, possibly starting resources multiple times.
+
+Usage:
+...............
+ clearstate <node>
+...............
+
+[[cmdhelp_node_delete,delete node]]
+==== `delete`
+
+Delete a node. This command will remove the node from the CIB
+and, in case the cluster stack is running, use the appropriate
+program (`crm_node` or `hb_delnode`) to remove the node from the
+membership.
+
+If the node is still listed as active and a member of our
+partition we refuse to remove it. With the global force option
+(`-F`) we will try to delete the node anyway.
+
+Usage:
+...............
+ delete <node>
+...............
+
+[[cmdhelp_node_attribute,manage attributes]]
+==== `attribute`
+
+Edit node attributes. This kind of attribute should refer to
+relatively static properties, such as memory size.
+
+Usage:
+...............
+ attribute <node> set <attr> <value>
+ attribute <node> delete <attr>
+ attribute <node> show <attr>
+...............
+Example:
+...............
+ attribute node_1 set memory_size 4096
+...............
+
+[[cmdhelp_node_utilization,manage utilization attributes]]
+==== `utilization`
+
+Edit node utilization attributes. These attributes describe
+hardware characteristics as integer numbers such as memory size
+or the number of CPUs. By setting the `placement-strategy`
+cluster property appropriately, it is possible then to distribute
+resources based on resource requirements and node size. See also
+<<cmdhelp_resource_utilization,resource utilization attributes>>.
+
+Usage:
+...............
+ utilization <node> set <attr> <value>
+ utilization <node> delete <attr>
+ utilization <node> show <attr>
+...............
+Examples:
+...............
+ utilization node_1 set memory 16384
+ utilization node_1 show cpu
+...............
+
+[[cmdhelp_node_status-attr,manage status attributes]]
+==== `status-attr`
+
+Edit node attributes which are in the CIB status section, i.e.
+attributes which hold properties of a more volatile nature. One
+typical example is attribute generated by the `pingd` utility.
+
+Usage:
+...............
+ status-attr <node> set <attr> <value>
+ status-attr <node> delete <attr>
+ status-attr <node> show <attr>
+...............
+Example:
+...............
+ status-attr node_1 show pingd
+...............
+
+[[cmdhelp_site,site support]]
+=== `site`
+
+A cluster may consist of two or more subclusters in different and
+distant locations. This set of commands supports such setups.
+
+[[cmdhelp_site_ticket,manage site tickets]]
+==== `ticket`
+
+Tickets are cluster-wide attributes. They can be managed at the
+site where this command is executed.
+
+It is then possible to constrain resources depending on the
+ticket availability (see the <<cmdhelp_configure_rsc_ticket,`rsc_ticket`>> command
+for more details).
+
+Usage:
+...............
+ ticket {grant|revoke|standby|activate|show|time|delete} <ticket>
+...............
+Example:
+...............
+ ticket grant ticket1
+...............
+
+[[cmdhelp_options,user preferences]]
+=== `options`
+
+The user may set various options for the crm shell itself.
+
+[[cmdhelp_options_skill-level,set skill level]]
+==== `skill-level`
+
+Based on the skill-level setting, the user is allowed to use only
+a subset of commands. There are three levels: operator,
+administrator, and expert. The operator level allows only
+commands at the `resource` and `node` levels, but not editing
+or deleting resources. The administrator may do that and may also
+configure the cluster at the `configure` level and manage the
+shadow CIBs. The expert may do all.
+
+Usage:
+...............
+ skill-level <level>
+
+ level :: operator | administrator | expert
+...............
+
+.Note on security
+****************************
+The `skill-level` option is advisory only. There is nothing
+stopping any users change their skill level (see
+<<topics_Security,Access Control Lists (ACL)>> on how to enforce
+access control).
+****************************
+
+[[cmdhelp_options_user,set the cluster user]]
+==== `user`
+
+Sufficient privileges are necessary in order to manage a
+cluster: programs such as `crm_verify` or `crm_resource` and,
+ultimately, `cibadmin` have to be run either as `root` or as the
+CRM owner user (typically `hacluster`). You don't have to worry
+about that if you run `crm` as `root`. A more secure way is to
+run the program with your usual privileges, set this option to
+the appropriate user (such as `hacluster`), and setup the
+`sudoers` file.
+
+Usage:
+...............
+ user system-user
+...............
+Example:
+...............
+ user hacluster
+...............
+
+[[cmdhelp_options_editor,set preferred editor program]]
+==== `editor`
+
+The `edit` command invokes an editor. Use this to specify your
+preferred editor program. If not set, it will default to either
+the value of the `EDITOR` environment variable or to one of the
+standard UNIX editors (`vi`,`emacs`,`nano`).
+
+Usage:
+...............
+ editor program
+...............
+Example:
+...............
+ editor vim
+...............
+
+[[cmdhelp_options_pager,set preferred pager program]]
+==== `pager`
+
+The `view` command displays text through a pager. Use this to
+specify your preferred pager program. If not set, it will default
+to either the value of the `PAGER` environment variable or to one
+of the standard UNIX system pagers (`less`,`more`,`pg`).
+
+[[cmdhelp_options_sort-elements,sort CIB elements]]
+==== `sort-elements`
+
+`crm` by default sorts CIB elements. If you want them appear in
+the order they were created, set this option to `no`.
+
+Usage:
+...............
+ sort-elements {yes|no}
+...............
+Example:
+...............
+ sort-elements no
+...............
+
+[[cmdhelp_options_wait,synchronous operation]]
+==== `wait`
+
+In normal operation, `crm` runs a command and gets back
+immediately to process other commands or get input from the user.
+With this option set to `yes` it will wait for the started
+transition to finish. In interactive mode dots are printed to
+indicate progress.
+
+Usage:
+...............
+ wait {yes|no}
+...............
+Example:
+...............
+ wait yes
+...............
+
+[[cmdhelp_options_output,set output type]]
+==== `output`
+
+`crm` can adorn configurations in two ways: in color (similar to
+for instance the `ls --color` command) and by showing keywords in
+upper case. Possible values are `plain`, `color`, and
+'uppercase'. It is possible to combine the latter two in order to
+get an upper case xmass tree. Just set this option to
+`color,uppercase`.
+
+[[cmdhelp_options_colorscheme,set colors for output]]
+==== `colorscheme`
+
+With `output` set to `color`, a comma separated list of colors
+from this option are used to emphasize:
+
+- keywords
+- object ids
+- attribute names
+- attribute values
+- scores
+- resource references
+
+`crm` can show colors only if there is curses support for python
+installed (usually provided by the `python-curses` package). The
+colors are whatever is available in your terminal. Use `normal`
+if you want to keep the default foreground color.
+
+This user preference defaults to
+`yellow,normal,cyan,red,green,magenta` which is good for
+terminals with dark background. You may want to change the color
+scheme and save it in the preferences file for other color
+setups.
+
+Example:
+...............
+ colorscheme yellow,normal,blue,red,green,magenta
+...............
+
+[[cmdhelp_options_check-frequency,when to perform semantic check]]
+==== `check-frequency`
+
+Semantic check of the CIB or elements modified or created may be
+done on every configuration change (`always`), when verifying
+(`on-verify`) or `never`. It is by default set to `always`.
+Experts may want to change the setting to `on-verify`.
+
+The checks require that resource agents are present. If they are
+not installed at the configuration time set this preference to
+`never`.
+
+See <<topics_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_check-mode,how to treat semantic errors]]
+==== `check-mode`
+
+Semantic check of the CIB or elements modified or created may be
+done in the `strict` mode or in the `relaxed` mode. In the former
+certain problems are treated as configuration errors. In the
+`relaxed` mode all are treated as warnings. The default is `strict`.
+
+See <<topics_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_add-quotes,add quotes around parameters containing spaces]]
+==== `add-quotes`
+
+The shell (as in `/bin/sh`) parser strips quotes from the command
+line. This may sometimes make it really difficult to type values
+which contain white space. One typical example is the configure
+filter command. The crm shell will supply extra quotes around
+arguments which contain white space. The default is `yes`.
+
+.Note on quotes use
+****************************
+Adding quotes around arguments automatically has been introduced
+with version 1.2.2 and it is technically a regression. Being a
+regression is the only reason the `add-quotes` option exists. If
+you have custom shell scripts which would break, just set the
+`add-quotes` option to `no`.
+
+For instance, with adding quotes enabled, it is possible to do
+the following:
+...............
+ # crm configure primitive d1 ocf:heartbeat:Dummy meta description="some description here"
+ # crm configure filter 'sed "s/hostlist=./&node-c /"' fencing
+...............
+****************************
+
+[[cmdhelp_options_manage-children,how to handle children resource attributes]]
+==== `manage-children`
+
+Some resource management commands, such as `resource stop`, when
+the target resource is a group, may not always produce desired
+result. Each element, group and the primitive members, can have a
+meta attribute and those attributes may end up with conflicting
+values. Consider the following construct:
+...............
+ crm(live)# configure show svc fs virtual-ip
+ primitive fs ocf:heartbeat:Filesystem \
+ params device="/dev/drbd0" directory="/srv/nfs" fstype="ext3" \
+ op monitor interval="10s" \
+ meta target-role="Started"
+ primitive virtual-ip ocf:heartbeat:IPaddr2 \
+ params ip="10.2.13.110" iflabel="1" \
+ op monitor interval="10s" \
+ op start interval="0" \
+ meta target-role="Started"
+ group svc fs virtual-ip \
+ meta target-role="Stopped"
+...............
+
+Even though the element `svc` should be stopped, the group is
+actually running because all its members have the `target-role`
+set to `Started`:
+...............
+ crm(live)# resource show svc
+ resource svc is running on: xen-f
+...............
+
+Hence, if the user invokes `resource stop svc` the intention is
+not clear. This preference gives the user an opportunity to
+better control what happens if attributes of group members have
+values which are in conflict with the same attribute of the group
+itself.
+
+Possible values are `ask` (the default), `always`, and `never`.
+If set to `always`, the crm shell removes all children attributes
+which have values different from the parent. If set to `never`,
+all children attributes are left intact. Finally, if set to
+`ask`, the user will be asked for each member what is to be done.
+
+[[cmdhelp_options_show,show current user preference]]
+==== `show`
+
+Display all current settings.
+
+[[cmdhelp_options_save,save the user preferences to the rc file]]
+==== `save`
+
+Save current settings to the rc file (`$HOME/.config/crm/rc`). On
+further `crm` runs, the rc file is automatically read and parsed.
+
+[[cmdhelp_options_reset,reset user preferences to factory defaults]]
+==== `reset`
+
+This command resets all user options to the defaults. If used as
+a single-shot command, the rc file (`$HOME/.config/crm/rc`) is
+reset to the defaults too.
+
+[[cmdhelp_configure,CIB configuration]]
+=== `configure`
+
+This level enables all CIB object definition commands.
+
+The configuration may be logically divided into four parts:
+nodes, resources, constraints, and (cluster) properties and
+attributes. Each of these commands support one or more basic CIB
+objects.
+
+Nodes and attributes describing nodes are managed using the
+`node` command.
+
+Commands for resources are:
+
+- `primitive`
+- `monitor`
+- `group`
+- `clone`
+- `ms`/`master` (master-slave)
+
+In order to streamline large configurations, it is possible to
+define a template which can later be referenced in primitives:
+
+- `rsc_template`
+
+In that case the primitive inherits all attributes defined in the
+template.
+
+There are three types of constraints:
+
+- `location`
+- `colocation`
+- `order`
+
+It is possible to define fencing order (stonith resource
+priorities):
+
+- `fencing_topology`
+
+Finally, there are the cluster properties, resource meta
+attributes defaults, and operations defaults. All are just a set
+of attributes. These attributes are managed by the following
+commands:
+
+- `property`
+- `rsc_defaults`
+- `op_defaults`
+
+In addition to the cluster configuration, the Access Control
+Lists (ACL) can be setup to allow access to parts of the CIB for
+users other than `root` and `hacluster`. The following commands
+manage ACL:
+
+- `user`
+- `role`
+
+The changes are applied to the current CIB only on ending the
+configuration session or using the `commit` command.
+
+Comments start with `#` in the first line. The comments are tied
+to the element which follows. If the element moves, its comments
+will follow.
+
+[[cmdhelp_configure_node,define a cluster node]]
+==== `node`
+
+The node command describes a cluster node. Nodes in the CIB are
+commonly created automatically by the CRM. Hence, you should not
+need to deal with nodes unless you also want to define node
+attributes. Note that it is also possible to manage node
+attributes at the `node` level.
+
+Usage:
+...............
+ node <uname>[:<type>]
+ [attributes <param>=<value> [<param>=<value>...]]
+ [utilization <param>=<value> [<param>=<value>...]]
+
+ type :: normal | member | ping
+...............
+Example:
+...............
+ node node1
+ node big_node attributes memory=64
+...............
+
+[[cmdhelp_configure_primitive,define a resource]]
+==== `primitive`
+
+The primitive command describes a resource. It may be referenced
+only once in group, clone, or master-slave objects. If it's not
+referenced, then it is placed as a single resource in the CIB.
+
+Operations may be specified in three ways. "Anonymous" as a
+simple list of "op" specifications. Use that if you don't want to
+reference the set of operations elsewhere. That's by far the most
+common way to define operations. If reusing operation sets is
+desired, use the "operations" keyword along with the id to give
+the operations set a name and the id-ref to reference another set
+of operations.
+
+Operation's attributes which are not recognized are saved as
+instance attributes of that operation. A typical example is
+`OCF_CHECK_LEVEL`.
+
+For multistate resources, roles are specified as `role=<role>`.
+
+A template may be defined for resources which are of the same
+type and which share most of the configuration. See
+<<cmdhelp_configure_rsc_template,`rsc_template`>> for more information.
+
+Usage:
+...............
+ primitive <rsc> {[<class>:[<provider>:]]<type>|@<template>}
+ [params attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+ attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+ id_spec :: $id=<id> | $id-ref=<id>
+ op_type :: start | stop | monitor
+...............
+Example:
+...............
+ primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s \
+ op monitor interval=30m timeout=60s
+
+ primitive www8 apache \
+ params configfile=/etc/apache/www8.conf \
+ operations $id-ref=apache_ops
+
+ primitive db0 mysql \
+ params config=/etc/mysql/db0.conf \
+ op monitor interval=60s \
+ op monitor interval=300s OCF_CHECK_LEVEL=10
+
+ primitive r0 ocf:linbit:drbd \
+ params drbd_resource=r0 \
+ op monitor role=Master interval=60s \
+ op monitor role=Slave interval=300s
+
+ primitive xen0 @vm_scheme1 \
+ params xmfile=/etc/xen/vm/xen0
+...............
+
+[[cmdhelp_configure_monitor,add monitor operation to a primitive]]
+==== `monitor`
+
+Monitor is by far the most common operation. It is possible to
+add it without editing the whole resource. Also, long primitive
+definitions may be a bit uncluttered. In order to make this
+command as concise as possible, less common operation attributes
+are not available. If you need them, then use the `op` part of
+the `primitive` command.
+
+Usage:
+...............
+ monitor <rsc>[:<role>] <interval>[:<timeout>]
+...............
+Example:
+...............
+ monitor apcfence 60m:60s
+...............
+
+Note that after executing the command, the monitor operation may
+be shown as part of the primitive definition.
+
+[[cmdhelp_configure_group,define a group]]
+==== `group`
+
+The `group` command creates a group of resources. This can be useful
+when resources depend on other resources and require that those
+resources start in order on the same node. A common use of resource
+groups is to ensure that a server and a virtual IP are located
+together, and that the virtual IP is started before the server.
+
+Grouped resources are started in the order they appear in the group,
+and stopped in the reverse order. If a resource in the group cannot
+run anywhere, resources following it in the group will not start.
+
+`group` can be passed the "container" meta attribute, to indicate that
+it is to be used to group VM resources monitored using Nagios. The
+resource referred to by the container attribute must be of type
+`ocf:heartbeat:Xen`, `ocf:heartbeat:VirtualDomain` or `ocf:heartbeat:lxc`.
+
+Usage:
+...............
+ group <name> <rsc> [<rsc>...]
+ [meta attr_list]
+ [params attr_list]
+
+ attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+ group internal_www disk0 fs0 internal_ip apache \
+ meta target_role=stopped
+
+ group vm-and-services vm vm-sshd meta container="vm"
+...............
+
+[[cmdhelp_configure_clone,define a clone]]
+==== `clone`
+
+The `clone` command creates a resource clone. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+ clone <name> <rsc>
+ [meta attr_list]
+ [params attr_list]
+
+ attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+ clone cl_fence apc_1 \
+ meta clone-node-max=1 globally-unique=false
+...............
+
+[[cmdhelp_configure_ms,define a master-slave resource]]
+==== `ms` (`master`)
+
+The `ms` command creates a master/slave resource type. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+ ms <name> <rsc>
+ [meta attr_list]
+ [params attr_list]
+
+ attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+ ms disk1 drbd1 \
+ meta notify=true globally-unique=false
+...............
+
+.Note on `id-ref` usage
+****************************
+Instance or meta attributes (`params` and `meta`) may contain
+a reference to another set of attributes. In that case, no other
+attributes are allowed. Since attribute sets' ids, though they do
+exist, are not shown in the `crm`, it is also possible to
+reference an object instead of an attribute set. `crm` will
+automatically replace such a reference with the right id:
+
+...............
+ crm(live)configure# primitive a2 www-2 meta $id-ref=a1
+ crm(live)configure# show a2
+ primitive a2 ocf:heartbeat:apache \
+ meta $id-ref="a1-meta_attributes"
+ [...]
+...............
+It is advisable to give meaningful names to attribute sets which
+are going to be referenced.
+****************************
+
+[[cmdhelp_configure_rsc_template,define a resource template]]
+==== `rsc_template`
+
+The `rsc_template` command creates a resource template. It may be
+referenced in primitives. It is used to reduce large
+configurations with many similar resources.
+
+Usage:
+...............
+ rsc_template <name> [<class>:[<provider>:]]<type>
+ [params attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+ attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+ id_spec :: $id=<id> | $id-ref=<id>
+ op_type :: start | stop | monitor
+...............
+Example:
+...............
+ rsc_template public_vm ocf:heartbeat:Xen \
+ op start timeout=300s \
+ op stop timeout=300s \
+ op monitor interval=30s timeout=60s \
+ op migrate_from timeout=600s \
+ op migrate_to timeout=600s
+ primitive xen0 @public_vm \
+ params xmfile=/etc/xen/xen0
+ primitive xen1 @public_vm \
+ params xmfile=/etc/xen/xen1
+...............
+
+[[cmdhelp_configure_location,a location preference]]
+==== `location`
+
+`location` defines the preference of nodes for the given
+resource. The location constraints consist of one or more rules
+which specify a score to be awarded if the rule matches.
+
+Usage:
+...............
+ location <id> <rsc> {node_pref|rules}
+
+ node_pref :: <score>: <node>
+
+ rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+ id_spec :: $id=<id> | $id-ref=<id>
+ score :: <number> | <attribute> | [-]inf
+ expression :: <simple_exp> [bool_op <simple_exp> ...]
+ bool_op :: or | and
+ simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+ type :: string | version | number
+ binary_op :: lt | gt | lte | gte | eq | ne
+ unary_op :: defined | not_defined
+
+ date_expr :: lt <end>
+ | gt <start>
+ | in_range start=<start> end=<end>
+ | in_range start=<start> <duration>
+ | date_spec <date_spec>
+ duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+...............
+Examples:
+...............
+ location conn_1 internal_www 100: node1
+
+ location conn_1 internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+
+ location conn_2 dummy_float \
+ rule -inf: not_defined pingd or pingd number:lte 0
+...............
+
+[[cmdhelp_configure_colocation,colocate resources]]
+==== `colocation` (`collocation`)
+
+This constraint expresses the placement relation between two
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+The score is used to indicate the priority of the constraint. A
+positive score indicates that the resources should run on the same
+node. A negative score that they should not run on the same
+node. Values of positive or negative `infinity` indicate a mandatory
+constraint.
+
+In the two resource form, the cluster will place `<with-rsc>` first,
+and then decide where to put the `<rsc>` resource.
+
+Collocation resource sets have an extra attribute (`sequential`)
+to allow for sets of resources which don't depend on each other
+in terms of state. The shell syntax for such sets is to put
+resources in parentheses.
+
+Sets cannot be nested.
+
+The optional `node-attribute` references an attribute in nodes'
+instance attributes.
+
+Usage:
+...............
+ colocation <id> <score>: <rsc>[:<role>] <with-rsc>[:<role>]
+ [node-attribute=<node_attr>]
+
+ colocation <id> <score>: <rsc>[:<role>] <rsc>[:<role>] ...
+ [node-attribute=<node_attr>]
+...............
+Example:
+...............
+ colocation never_put_apache_with_dummy -inf: apache dummy
+ colocation c1 inf: A ( B C )
+...............
+
+[[cmdhelp_configure_order,order resources]]
+==== `order`
+
+This constraint expresses the order of actions on two resources
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+Ordered resource sets have an extra attribute to allow for sets
+of resources whose actions may run in parallel. The shell syntax
+for such sets is to put resources in parentheses.
+
+If the subsequent resource can start or promote after any one of the
+resources in a set has done, enclose the set in brackets (`[` and `]`).
+
+Sets cannot be nested.
+
+Three strings are reserved to specify a kind of order constraint:
+`Mandatory`, `Optional`, and `Serialize`. It is preferred to use
+one of these settings instead of score. Previous versions mapped
+scores `0` and `inf` to keywords `advisory` and `mandatory`.
+That is still valid but deprecated.
+
+.Note on resource sets' XML attributes
+****************************
+The XML attribute `require-all` controls whether all resources in
+a set are, well, required. The bracketed sets actually have this
+attribute as well as `sequential` set to `false`. If you need a
+different combination, for whatever reason, just set one of the
+attributes within the set. Something like this:
+
+...............
+ crm(live)configure# order o1 Mandatory: [ A B sequential=true ] C
+...............
+It is up to you to find out whether such a combination makes
+sense.
+****************************
+
+Usage:
+...............
+ order <id> {kind|<score>}: <rsc>[:<action>] <rsc>[:<action>] ...
+ [symmetrical=<bool>]
+
+ kind :: Mandatory | Optional | Serialize
+...............
+Example:
+...............
+ order c_apache_1 Mandatory: apache:start ip_1
+ order o1 Serialize: A ( B C )
+ order order_2 Mandatory: [ A B ] C
+...............
+
+[[cmdhelp_configure_rsc_ticket,resources ticket dependency]]
+==== `rsc_ticket`
+
+This constraint expresses dependency of resources on cluster-wide
+attributes, also known as tickets. Tickets are mainly used in
+geo-clusters, which consist of multiple sites. A ticket may be
+granted to a site, thus allowing resources to run there.
+
+The `loss-policy` attribute specifies what happens to the
+resource (or resources) if the ticket is revoked. The default is
+either `stop` or `demote` depending on whether a resource is
+multi-state.
+
+See also the <<cmdhelp_site_ticket,`site`>> set of commands.
+
+Usage:
+...............
+ rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]
+
+ loss_policy_action :: stop | demote | fence | freeze
+...............
+Example:
+...............
+ rsc_ticket ticket-A_public-ip ticket-A: public-ip
+ rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence
+ rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master
+...............
+
+
+[[cmdhelp_configure_property,set a cluster property]]
+==== `property`
+
+Set the cluster (`crm_config`) options.
+
+Usage:
+...............
+ property [$id=<set_id>] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+ property stonith-enabled=true
+...............
+
+[[cmdhelp_configure_rsc_defaults,set resource defaults]]
+==== `rsc_defaults`
+
+Set defaults for the resource meta attributes.
+
+Usage:
+...............
+ rsc_defaults [$id=<set_id>] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+ rsc_defaults failure-timeout=3m
+...............
+
+[[cmdhelp_configure_fencing_topology,node fencing order]]
+==== `fencing_topology`
+
+If multiple fencing (stonith) devices are available capable of
+fencing a node, their order may be specified by `fencing_topology`.
+The order is specified per node.
+
+Stonith resources can be separated by `,` in which case all of
+them need to succeed. If they fail, the next stonith resource (or
+set of resources) is used. In other words, use comma to separate
+resources which all need to succeed and whitespace for serial
+order. It is not allowed to use whitespace around comma.
+
+If the node is left out, the order is used for all nodes.
+That should reduce the configuration size in some stonith setups.
+
+Usage:
+...............
+ fencing_topology stonith_resources [stonith_resources ...]
+ fencing_topology fencing_order [fencing_order ...]
+
+ fencing_order :: <node>: stonith_resources [stonith_resources ...]
+
+ stonith_resources :: <rsc>[,<rsc>...]
+...............
+Example:
+...............
+ fencing_topology poison-pill power
+ fencing_topology \
+ node-a: poison-pill power
+ node-b: ipmi serial
+...............
+
+[[cmdhelp_configure_role,define role access rights]]
+==== `role`
+
+An ACL role is a set of rules which describe access rights to
+CIB. Rules consist of an access right `read`, `write`, or `deny`
+and a specification denoting part of the configuration to which
+the access right applies. The specification can be an XPath or a
+combination of tag and id references. If an attribute is
+appended, then the specification applies only to that attribute
+of the matching element.
+
+There is a number of shortcuts for XPath specifications. The
+`meta,` `params`, and `utilization` shortcuts reference resource
+meta attributes, parameters, and utilization respectively. The
+`location` may be used to specify location constraints most of
+the time to allow resource `move` and `unmove` commands. The
+`property` references cluster properties. The `node` allows
+reading node attributes. `nodeattr` and `nodeutil` reference node
+attributes and node capacity (utilization). The `status` shortcut
+references the whole status section of the CIB. Read access to
+status is necessary for various monitoring tools such as
+`crm_mon(8)` (aka `crm status`).
+
+Usage:
+...............
+ role <role-id> rule [rule ...]
+
+ rule :: acl-right cib-spec [attribute:<attribute>]
+
+ acl-right :: read | write | deny
+
+ cib-spec :: xpath-spec | tag-ref-spec
+ xpath-spec :: xpath:<xpath> | shortcut
+ tag-ref-spec :: tag:<tag> | ref:<id> | tag:<tag> ref:<id>
+
+ shortcut :: meta:<rsc>[:<attr>]
+ params:<rsc>[:<attr>]
+ utilization:<rsc>
+ location:<rsc>
+ property[:<attr>]
+ node[:<node>]
+ nodeattr[:<attr>]
+ nodeutil[:<node>]
+ status
+...............
+Example:
+...............
+ role app1_admin \
+ write meta:app1:target-role \
+ write meta:app1:is-managed \
+ write location:app1 \
+ read ref:app1
+...............
+
+[[cmdhelp_configure_user,define user access rights]]
+==== `user`
+
+Users which normally cannot view or manage cluster configuration
+can be allowed access to parts of the CIB. The access is defined
+by a set of `read`, `write`, and `deny` rules as in role
+definitions or by referencing roles. The latter is considered
+best practice.
+
+Usage:
+...............
+ user <uid> {roles|rules}
+
+ roles :: role:<role-ref> [role:<role-ref> ...]
+ rules :: rule [rule ...]
+...............
+Example:
+...............
+ user joe \
+ role:app1_admin \
+ role:read_all
+...............
+
+[[cmdhelp_configure_op_defaults,set resource operations defaults]]
+==== `op_defaults`
+
+Set defaults for the operations meta attributes.
+
+Usage:
+...............
+ op_defaults [$id=<set_id>] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+ op_defaults record-pending=true
+...............
+
+[[cmdhelp_configure_schema,set or display current CIB RNG schema]]
+==== `schema`
+
+CIB's content is validated by a RNG schema. Pacemaker supports
+several, depending on version. Currently supported schemas are
+`pacemaker-1.0`, `pacemaker-1.1`, and `pacemaker-1.2`.
+
+Use this command to display or switch to another RNG schema.
+
+Usage:
+...............
+ schema [<schema>]
+...............
+Example:
+...............
+ schema pacemaker-1.1
+...............
+
+[[cmdhelp_configure_show,display CIB objects]]
+==== `show`
+
+The `show` command displays objects. It may display all objects
+or a set of objects. The user may also choose to see only objects
+which were changed.
+Optionally, the XML code may be displayed instead of the CLI
+representation.
+
+Usage:
+...............
+ show [xml] [<id> ...]
+ show [xml] changed
+...............
+
+[[cmdhelp_configure_edit,edit CIB objects]]
+==== `edit`
+
+This command invokes the editor with the object description. As
+with the `show` command, the user may choose to edit all objects
+or a set of objects.
+
+If the user insists, he or she may edit the XML edition of the
+object. If you do that, don't modify any id attributes.
+
+Usage:
+...............
+ edit [xml] [<id> ...]
+ edit [xml] changed
+...............
+
+.Note on renaming element ids
+****************************
+The edit command sometimes cannot properly handle modifying
+element ids. In particular for elements which belong to group or
+ms resources. Group and ms resources themselves also cannot be
+renamed. Please use the `rename` command instead.
+****************************
+
+[[cmdhelp_configure_filter,filter CIB objects]]
+==== `filter`
+
+This command filters the given CIB elements through an external
+program. The program should accept input on `stdin` and send
+output to `stdout` (the standard UNIX filter conventions). As
+with the `show` command, the user may choose to filter all or
+just a subset of elements.
+
+It is possible to filter the XML representation of objects, but
+probably not as useful as the configuration language. The
+presentation is somewhat different from what would be displayed
+by the `show` command---each element is shown on a single line,
+i.e. there are no backslashes and no other embelishments.
+
+Don't forget to put quotes around the filter if it contains
+spaces.
+
+Usage:
+...............
+ filter <prog> [xml] [<id> ...]
+ filter <prog> [xml] changed
+...............
+Examples:
+...............
+ filter "sed '/^primitive/s/target-role=[^ ]*//'"
+ # crm configure filter "sed '/^primitive/s/target-role=[^ ]*//'"
+...............
+
+[[cmdhelp_configure_delete,delete CIB objects]]
+==== `delete`
+
+Delete one or more objects. If an object to be deleted belongs to
+a container object, such as a group, and it is the only resource
+in that container, then the container is deleted as well. Any
+related constraints are removed as well.
+
+Usage:
+...............
+ delete <id> [<id>...]
+...............
+
+[[cmdhelp_configure_default-timeouts,set timeouts for operations to minimums from the meta-data]]
+==== `default-timeouts`
+
+This command takes the timeouts from the actions section of the
+resource agent meta-data and sets them for the operations of the
+primitive.
+
+Usage:
+...............
+ default-timeouts <id> [<id>...]
+...............
+
+.Note on `default-timeouts`
+****************************
+You may be happy using this, but your applications may not. And
+it will tell you so at the worst possible moment. You have been
+warned.
+****************************
+
+[[cmdhelp_configure_rename,rename a CIB object]]
+==== `rename`
+
+Rename an object. It is recommended to use this command to rename
+a resource, because it will take care of updating all related
+constraints and a parent resource. Changing ids with the edit
+command won't have the same effect.
+
+If you want to rename a resource, it must be in the stopped state.
+
+Usage:
+...............
+ rename <old_id> <new_id>
+...............
+
+[[cmdhelp_configure_modgroup,modify group]]
+==== `modgroup`
+
+Add or remove primitives in a group. The `add` subcommand appends
+the new group member by default. Should it go elsewhere, there
+are `after` and `before` clauses.
+
+Usage:
+...............
+ modgroup <id> add <id> [after <id>|before <id>]
+ modgroup <id> remove <id>
+...............
+Examples:
+...............
+ modgroup share1 add storage2 before share1-fs
+...............
+
+[[cmdhelp_configure_refresh,refresh from CIB]]
+==== `refresh`
+
+Refresh the internal structures from the CIB. All changes made
+during this session are lost.
+
+Usage:
+...............
+ refresh
+...............
+
+[[cmdhelp_configure_erase,erase the CIB]]
+==== `erase`
+
+The `erase` clears all configuration. Apart from nodes. To remove
+nodes, you have to specify an additional keyword `nodes`.
+
+Note that removing nodes from the live cluster may have some
+strange/interesting/unwelcome effects.
+
+Usage:
+...............
+ erase [nodes]
+...............
+
+[[cmdhelp_configure_ptest,show cluster actions if changes were committed]]
+==== `ptest` (`simulate`)
+
+Show PE (Policy Engine) motions using `ptest(8)` or
+`crm_simulate(8)`.
+
+A CIB is constructed using the current user edited configuration
+and the status from the running CIB. The resulting CIB is run
+through `ptest` (or `crm_simulate`) to show changes which would
+happen if the configuration is committed.
+
+The status section may be loaded from another source and modified
+using the <<cmdhelp_cibstatus,`cibstatus`>> level commands. In that case, the
+`ptest` command will issue a message informing the user that the
+Policy Engine graph is not calculated based on the current status
+section and therefore won't show what would happen to the
+running but some imaginary cluster.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Add a string of `v` characters to increase verbosity. `ptest`
+can also show allocation scores. `utilization` turns on
+information about the remaining capacity of nodes. With the
+`actions` option, `ptest` will print all resource actions.
+
+The `ptest` program has been replaced by `crm_simulate` in newer
+Pacemaker versions. In some installations both could be
+installed. Use `simulate` to enfore using `crm_simulate`.
+
+Usage:
+...............
+ ptest [nograph] [v...] [scores] [actions] [utilization]
+...............
+Examples:
+...............
+ ptest scores
+ ptest vvvvv
+ simulate actions
+...............
+
+[[cmdhelp_configure_rsctest,test resources as currently configured]]
+==== `rsctest`
+
+Test resources with current resource configuration. If no nodes
+are specified, tests are run on all known nodes.
+
+The order of resources is significant: it is assumed that later
+resources depend on earlier ones.
+
+If a resource is multi-state, it is assumed that the role on
+which later resources depend is master.
+
+Tests are run sequentially to prevent running the same resource
+on two or more nodes. Tests are carried out only if none of the
+specified nodes currently run any of the specified resources.
+However, it won't verify whether resources run on the other
+nodes.
+
+Superuser privileges are obviously required: either run this as
+root or setup the `sudoers` file appropriately.
+
+Note that resource testing may take some time.
+
+Usage:
+...............
+ rsctest <rsc_id> [<rsc_id> ...] [<node_id> ...]
+...............
+Examples:
+...............
+ rsctest my_ip websvc
+ rsctest websvc nodeB
+...............
+
+[[cmdhelp_configure_cib,CIB shadow management]]
+=== `cib` (shadow CIBs)
+
+This level is for management of shadow CIBs. It is available at
+the `configure` level to enable saving intermediate changes to a
+shadow CIB instead of to the live cluster. This short excerpt
+shows how:
+...............
+ crm(live)configure# cib new test-2
+ INFO: test-2 shadow CIB created
+ crm(test-2)configure# commit
+...............
+Note how the current CIB in the prompt changed from `live` to
+`test-2` after issuing the `cib new` command. See also the
+<<cmdhelp_cib,CIB shadow management>> for more information.
+
+[[cmdhelp_configure_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_configure_template,edit and import a configuration from a template]]
+==== `template`
+
+The specified template is loaded into the editor. It's up to the
+user to make a good CRM configuration out of it. See also the
+<<cmdhelp_template,template section>>.
+
+Usage:
+...............
+ template [xml] url
+...............
+Example:
+...............
+ template two-apaches.txt
+...............
+
+[[cmdhelp_configure_commit,commit the changes to the CIB]]
+==== `commit`
+
+Commit the current configuration to the CIB in use. As noted
+elsewhere, commands in a configure session don't have immediate
+effect on the CIB. All changes are applied at one point in time,
+either using `commit` or when the user leaves the configure
+level. In case the CIB in use changed in the meantime, presumably
+by somebody else, the crm shell will refuse to apply the changes.
+If you know that it's fine to still apply them add `force`.
+
+Usage:
+...............
+ commit [force]
+...............
+
+[[cmdhelp_configure_verify,verify the CIB with crm_verify]]
+==== `verify`
+
+Verify the contents of the CIB which would be committed.
+
+Usage:
+...............
+ verify
+...............
+
+[[cmdhelp_configure_upgrade,upgrade the CIB to version 1.0]]
+==== `upgrade`
+
+If you get the `CIB not supported` error, which typically means
+that the current CIB version is coming from the older release,
+you may try to upgrade it to the latest revision. The command
+to perform the upgrade is:
+...............
+ # cibadmin --upgrade --force
+...............
+
+If we don't recognize the current CIB as the old one, but you're
+sure that it is, you may force the command.
+
+Usage:
+...............
+ upgrade [force]
+...............
+
+[[cmdhelp_configure_save,save the CIB to a file]]
+==== `save`
+
+Save the current configuration to a file. Optionally, as XML. Use
+`-` instead of file name to write the output to `stdout`.
+
+Usage:
+...............
+ save [xml] <file>
+...............
+Example:
+...............
+ save myfirstcib.txt
+...............
+
+[[cmdhelp_configure_load,import the CIB from a file]]
+==== `load`
+
+Load a part of configuration (or all of it) from a local file or
+a network URL. The `replace` method replaces the current
+configuration with the one from the source. The `update` tries to
+import the contents into the current configuration.
+The file may be a CLI file or an XML file.
+
+Usage:
+...............
+ load [xml] <method> URL
+
+ method :: replace | update
+...............
+Example:
+...............
+ load xml update myfirstcib.xml
+ load xml replace http://storage.big.com/cibs/bigcib.xml
+...............
+
+[[cmdhelp_configure_graph,generate a directed graph]]
+==== `graph`
+
+Create a graphviz graphical layout from the current cluster
+configuration.
+
+Currently, only `dot` (directed graph) is supported. It is
+essentially a visualization of resource ordering.
+
+The graph may be saved to a file which can be used as source for
+various graphviz tools (by default it is displayed in the user's
+X11 session). Optionally, by specifying the format, one can also
+produce an image instead.
+
+For more or different graphviz attributes, it is possible to save
+the default set of attributes to an ini file. If this file exists
+it will always override the builtin settings. The `exportsettings`
+subcommand also prints the location of the ini file.
+
+Usage:
+...............
+ graph [<gtype> [<file> [<img_format>]]]
+ graph exportsettings
+
+ gtype :: dot
+ img_format :: `dot` output format (see the `-T` option)
+...............
+Example:
+...............
+ graph dot
+ graph dot clu1.conf.dot
+ graph dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_configure_xml,raw xml]]
+==== `xml`
+
+Even though we promissed no xml, it may happen, but hopefully
+very very seldom, that an element from the CIB cannot be rendered
+in the configuration language. In that case, the element will be
+shown as raw xml, prefixed by this command. That element can then
+be edited like any other. If the shell finds out that after the
+change it can digest it, then it is going to be converted into
+the normal configuration language. Otherwise, there is no need to
+use `xml` for configuration.
+
+Usage:
+...............
+ xml <xml>
+...............
+
+[[cmdhelp_template,edit and import a configuration from a template]]
+=== `template`
+
+User may be assisted in the cluster configuration by templates
+prepared in advance. Templates consist of a typical ready
+configuration which may be edited to suit particular user needs.
+
+This command enters a template level where additional commands
+for configuration/template management are available.
+
+[[cmdhelp_template_new,create a new configuration from templates]]
+==== `new`
+
+Create a new configuration from one or more templates. Note that
+configurations and templates are kept in different places, so it
+is possible to have a configuration name equal a template name.
+
+If you already know which parameters are required, you can set
+them directly on the command line.
+
+The parameter name `id` is set by default to the name of the
+configuration.
+
+Usage:
+...............
+ new <config> <template> [<template> ...] [params name=value ...]"
+...............
+Examples:
+...............
+ new vip virtual-ip
+ new bigfs ocfs2 params device=/dev/sdx8 directory=/bigfs
+...............
+
+[[cmdhelp_template_load,load a configuration]]
+==== `load`
+
+Load an existing configuration. Further `edit`, `show`, and
+`apply` commands will refer to this configuration.
+
+Usage:
+...............
+ load <config>
+...............
+
+[[cmdhelp_template_edit,edit a configuration]]
+==== `edit`
+
+Edit current or given configuration using your favourite editor.
+
+Usage:
+...............
+ edit [<config>]
+...............
+
+[[cmdhelp_template_delete,delete a configuration]]
+==== `delete`
+
+Remove a configuration. The loaded (active) configuration may be
+removed by force.
+
+Usage:
+...............
+ delete <config> [force]
+...............
+
+[[cmdhelp_template_list,list configurations/templates]]
+==== `list`
+
+List existing configurations or templates.
+
+Usage:
+...............
+ list [templates]
+...............
+
+[[cmdhelp_template_apply,process and apply the current configuration to the current CIB]]
+==== `apply`
+
+Copy the current or given configuration to the current CIB. By
+default, the CIB is replaced, unless the method is set to
+"update".
+
+Usage:
+...............
+ apply [<method>] [<config>]
+
+ method :: replace | update
+...............
+
+[[cmdhelp_template_show,show the processed configuration]]
+==== `show`
+
+Process the current or given configuration and display the result.
+
+Usage:
+...............
+ show [<config>]
+...............
+
+[[cmdhelp_cibstatus,CIB status management and editing]]
+=== `cibstatus`
+
+The `status` section of the CIB keeps the current status of nodes
+and resources. It is modified _only_ on events, i.e. when some
+resource operation is run or node status changes. For obvious
+reasons, the CRM has no user interface with which it is possible
+to affect the status section. From the user's point of view, the
+status section is essentially a read-only part of the CIB. The
+current status is never even written to disk, though it is
+available in the PE (Policy Engine) input files which represent
+the history of cluster motions. The current status may be read
+using the `cibadmin -Q` command.
+
+It may sometimes be of interest to see how status changes would
+affect the Policy Engine. The set of `cibstatus` level commands
+allow the user to load status sections from various sources and
+then insert or modify resource operations or change nodes' state.
+
+The effect of those changes may then be observed by running the
+<<cmdhelp_configure_ptest,`ptest`>> command at the `configure` level
+or `simulate` and `run` commands at this level. The `ptest`
+runs with the user edited CIB whereas the latter two commands
+run with the CIB which was loaded along with the status section.
+
+The `simulate` and `run` commands as well as all status
+modification commands are implemented using `crm_simulate(8)`.
+
+[[cmdhelp_cibstatus_load,load the CIB status section]]
+==== `load`
+
+Load a status section from a file, a shadow CIB, or the running
+cluster. By default, the current (`live`) status section is
+modified. Note that if the `live` status section is modified it
+is not going to be updated if the cluster status changes, because
+that would overwrite the user changes. To make `crm` drop changes
+and resume use of the running cluster status, run `load live`.
+
+All CIB shadow configurations contain the status section which is
+a snapshot of the status section taken at the time the shadow was
+created. Obviously, this status section doesn't have much to do
+with the running cluster status, unless the shadow CIB has just
+been created. Therefore, the `ptest` command by default uses the
+running cluster status section.
+
+Usage:
+...............
+ load {<file>|shadow:<cib>|live}
+...............
+Example:
+...............
+ load bug-12299.xml
+ load shadow:test1
+...............
+
+[[cmdhelp_cibstatus_save,save the CIB status section]]
+==== `save`
+
+The current internal status section with whatever modifications
+were performed can be saved to a file or shadow CIB.
+
+If the file exists and contains a complete CIB, only the status
+section is going to be replaced and the rest of the CIB will
+remain intact. Otherwise, the current user edited configuration
+is saved along with the status section.
+
+Note that all modifications are saved in the source file as soon
+as they are run.
+
+Usage:
+...............
+ save [<file>|shadow:<cib>]
+...............
+Example:
+...............
+ save bug-12299.xml
+...............
+
+[[cmdhelp_cibstatus_origin,display origin of the CIB status section]]
+==== `origin`
+
+Show the origin of the status section currently in use. This
+essentially shows the latest `load` argument.
+
+Usage:
+...............
+ origin
+...............
+
+[[cmdhelp_cibstatus_show,show CIB status section]]
+==== `show`
+
+Show the current status section in the XML format. Brace yourself
+for some unreadable output. Add `changed` option to get a human
+readable output of all changes.
+
+Usage:
+...............
+ show [changed]
+...............
+
+[[cmdhelp_cibstatus_node,change node status]]
+==== `node`
+
+Change the node status. It is possible to throw a node out of
+the cluster, make it a member, or set its state to unclean.
+
+`online`:: Set the `node_state` `crmd` attribute to `online`
+and the `expected` and `join` attributes to `member`. The effect
+is that the node becomes a cluster member.
+
+`offline`:: Set the `node_state` `crmd` attribute to `offline`
+and the `expected` attribute to empty. This makes the node
+cleanly removed from the cluster.
+
+`unclean`:: Set the `node_state` `crmd` attribute to `offline`
+and the `expected` attribute to `member`. In this case the node
+has unexpectedly disappeared.
+
+Usage:
+...............
+ node <node> {online|offline|unclean}
+...............
+Example:
+...............
+ node xen-b unclean
+...............
+
+[[cmdhelp_cibstatus_op,edit outcome of a resource operation]]
+==== `op`
+
+Edit the outcome of a resource operation. This way you can
+tell CRM that it ran an operation and that the resource agent
+returned certain exit code. It is also possible to change the
+operation's status. In case the operation status is set to
+something other than `done`, the exit code is effectively
+ignored.
+
+Usage:
+...............
+ op <operation> <resource> <exit_code> [<op_status>] [<node>]
+
+ operation :: probe | monitor[:<n>] | start | stop |
+ promote | demote | notify | migrate_to | migrate_from
+ exit_code :: <rc> | success | generic | args |
+ unimplemented | perm | installed | configured | not_running |
+ master | failed_master
+ op_status :: pending | done | cancelled | timeout | notsupported | error
+
+ n :: the monitor interval in seconds; if omitted, the first
+ recurring operation is referenced
+ rc :: numeric exit code in range 0..9
+...............
+Example:
+...............
+ op start d1 xen-b generic
+ op start d1 xen-b 1
+ op monitor d1 xen-b not_running
+ op stop d1 xen-b 0 timeout
+...............
+
+[[cmdhelp_cibstatus_quorum,set the quorum]]
+==== `quorum`
+
+Set the quorum value.
+
+Usage:
+...............
+ quorum <bool>
+...............
+Example:
+...............
+ quorum false
+...............
+
+[[cmdhelp_cibstatus_ticket,manage tickets]]
+==== `ticket`
+
+Modify the ticket status. Tickets can be granted and revoked.
+Granted tickets could be activated or put in standby.
+
+Usage:
+...............
+ ticket <ticket> {grant|revoke|activate|standby}
+...............
+Example:
+...............
+ ticket ticketA grant
+...............
+
+[[cmdhelp_cibstatus_run,run policy engine]]
+==== `run`
+
+Run the policy engine with the edited status section.
+
+Add a string of `v` characters to increase verbosity. Specify
+`scores` to see allocation scores also. `utilization` turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+ run [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+ run
+...............
+
+[[cmdhelp_cibstatus_simulate,simulate cluster transition]]
+==== `simulate`
+
+Run the policy engine with the edited status section and simulate
+the transition.
+
+Add a string of `v` characters to increase verbosity. Specify
+`scores` to see allocation scores also. `utilization` turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+ simulate [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+ simulate
+...............
+
+[[cmdhelp_history,cluster history]]
+=== `history`
+
+Examining Pacemaker's history is a particularly involved task.
+The number of subsystems to be considered, the complexity of the
+configuration, and the set of various information sources, most
+of which are not exactly human readable, keep analyzing resource
+or node problems accessible to only the most knowledgeable. Or,
+depending on the point of view, to the most persistent. The
+following set of commands has been devised in hope to make
+cluster history more accessible.
+
+Of course, looking at _all_ history could be time consuming
+regardless of how good tools at hand are. Therefore, one should
+first say which period he or she wants to analyze. If not
+otherwise specified, the last hour is considered. Logs and other
+relevant information is collected using `hb_report`. Since this
+process takes some time and we always need fresh logs,
+information is refreshed in a much faster way using `pssh(1)`. If
+`python-pssh` is not found on the system, examining live cluster
+is still possible though not as comfortable.
+
+Apart from examining live cluster, events may be retrieved from a
+report generated by `hb_report` (see also the `-H` option). In
+that case we assume that the period stretching the whole report
+needs to be investigated. Of course, it is still possible to
+further reduce the time range.
+
+If you think you may have found a bug or just need clarification
+from developers or your support, the `session pack` command can
+help create a report. This is an example:
+...............
+ crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
+ crm(live)history# session save strange_restart
+ crm(live)history# session pack
+ Report saved in .../strange_restart.tar.bz2
+ crm(live)history#
+...............
+In order to reduce report size and allow developers to
+concentrate on the issue, you should beforehand limit the time
+frame. Giving a meaningful session name helps too.
+
+==== `info`
+
+The `info` command shows most important information about the
+cluster.
+
+Usage:
+...............
+ info
+...............
+Example:
+...............
+ info
+...............
+
+[[cmdhelp_history_latest,show latest news from the cluster]]
+==== `latest`
+
+The `latest` command shows a bit of recent history, more
+precisely whatever happened since the last cluster change (the
+latest transition). If the transition is running, the shell will
+first wait until it finishes.
+
+Usage:
+...............
+ latest
+...............
+Example:
+...............
+ latest
+...............
+
+[[cmdhelp_history_limit,limit timeframe to be examined]]
+==== `limit` (`timeframe`)
+
+All history commands look at events within certain period. It
+defaults to the last hour for the live cluster source. There is
+no limit for the `hb_report` source. Use this command to set the
+timeframe.
+
+The time period is parsed by the dateutil python module. It
+covers wide range of date formats. For instance:
+
+- 3:00 (today at 3am)
+- 15:00 (today at 3pm)
+- 2010/9/1 2pm (September 1st 2010 at 2pm)
+
+We won't bother to give definition of the time specification in
+usage below. Either use common sense or read the
+http://labix.org/python-dateutil[dateutil] documentation.
+
+If dateutil is not available, then the time is parsed using
+strptime and only the kind as printed by `date(1)` is allowed:
+
+- Tue Sep 15 20:46:27 CEST 2010
+
+Usage:
+...............
+ limit [<from_time> [<to_time>]]
+...............
+Examples:
+...............
+ limit 10:15
+ limit 15h22m 16h
+ limit "Sun 5 20:46" "Sun 5 22:00"
+...............
+
+[[cmdhelp_history_source,set source to be examined]]
+==== `source`
+
+Events to be examined can come from the current cluster or from a
+`hb_report` report. This command sets the source. `source live`
+sets source to the running cluster and system logs. If no source
+is specified, the current source information is printed.
+
+In case a report source is specified as a file reference, the file
+is going to be unpacked in place where it resides. This directory
+is not removed on exit.
+
+Usage:
+...............
+ source [<dir>|<file>|live]
+...............
+Examples:
+...............
+ source live
+ source /tmp/customer_case_22.tar.bz2
+ source /tmp/customer_case_22
+ source
+...............
+
+[[cmdhelp_history_refresh,refresh live report]]
+==== `refresh`
+
+This command makes sense only for the `live` source and makes
+`crm` collect the latest logs and other relevant information from
+the logs. If you want to make a completely new report, specify
+`force`.
+
+Usage:
+...............
+ refresh [force]
+...............
+
+[[cmdhelp_history_detail,set the level of detail shown]]
+==== `detail`
+
+How much detail to show from the logs.
+
+Usage:
+...............
+ detail <detail_level>
+
+ detail_level :: small integer (defaults to 0)
+...............
+Example:
+...............
+ detail 1
+...............
+
+[[cmdhelp_history_setnodes,set the list of cluster nodes]]
+==== `setnodes`
+
+In case the host this program runs on is not part of the cluster,
+it is necessary to set the list of nodes.
+
+Usage:
+...............
+ setnodes node <node> [<node> ...]
+...............
+Example:
+...............
+ setnodes node_a node_b
+...............
+
+[[cmdhelp_history_resource,resource events]]
+==== `resource`
+
+Show actions and any failures that happened on all specified
+resources on all nodes. Normally, one gives resource names as
+arguments, but it is also possible to use extended regular
+expressions. Note that neither groups nor clones or master/slave
+names are ever logged. The resource command is going to expand
+all of these appropriately, so that clone instances or resources
+which are part of a group are shown.
+
+Usage:
+...............
+ resource <rsc> [<rsc> ...]
+...............
+Example:
+...............
+ resource bigdb public_ip
+ resource my_.*_db2
+ resource ping_clone
+...............
+
+[[cmdhelp_history_node,node events]]
+==== `node`
+
+Show important events that happened on a node. Important events
+are node lost and join, standby and online, and fence. Use either
+node names or extended regular expressions.
+
+Usage:
+...............
+ node <node> [<node> ...]
+...............
+Example:
+...............
+ node node1
+...............
+
+[[cmdhelp_history_log,log content]]
+==== `log`
+
+Show messages logged on one or more nodes. Leaving out a node
+name produces combined logs of all nodes. Messages are sorted by
+time and, if the terminal emulations supports it, displayed in
+different colours depending on the node to allow for easier
+reading.
+
+The sorting key is the timestamp as written by syslog which
+normally has the maximum resolution of one second. Obviously,
+messages generated by events which share the same timestamp may
+not be sorted in the same way as they happened. Such close events
+may actually happen fairly often.
+
+Usage:
+...............
+ log [<node>]
+...............
+Example:
+...............
+ log node-a
+...............
+
+[[cmdhelp_history_exclude,exclude log messages]]
+==== `exclude`
+
+If a log is infested with irrelevant messages, those messages may
+be excluded by specifying a regular expression. The regular
+expressions used are Python extended. This command is additive.
+To drop all regular expressions, use `exclude clear`. Run
+`exclude` only to see the current list of regular expressions.
+Excludes are saved along with the history sessions.
+
+Usage:
+...............
+ exclude [<regex>|clear]
+...............
+Example:
+...............
+ exclude kernel.*ocfs2
+...............
+
+[[cmdhelp_history_peinputs,list or get PE input files]]
+==== `peinputs`
+
+Every event in the cluster results in generating one or more
+Policy Engine (PE) files. These files describe future motions of
+resources. The files are listed as full paths in the current
+report directory. Add `v` to also see the creation time stamps.
+
+Usage:
+...............
+ peinputs [{<range>|<number>} ...] [v]
+
+ range :: <n1>:<n2>
+...............
+Example:
+...............
+ peinputs
+ peinputs 440:444 446
+ peinputs v
+...............
+
+[[cmdhelp_history_transition,show transition]]
+==== `transition`
+
+This command will print actions planned by the PE and run
+graphviz (`dotty`) to display a graphical representation of the
+transition. Of course, for the latter an X11 session is required.
+This command invokes `ptest(8)` in background.
+
+The `showdot` subcommand runs graphviz (`dotty`) to display a
+graphical representation of the `.dot` file which has been
+included in the report. Essentially, it shows the calculation
+produced by `pengine` which is installed on the node where the
+report was produced. In optimal case this output should not
+differ from the one produced by the locally installed `pengine`.
+
+The `log` subcommand shows the full log for the duration of the
+transition.
+
+A transition can also be saved to a CIB shadow for further
+analysis or use with `cib` or `configure` commands (use the
+`save` subcommand). The shadow file name defaults to the name of
+the PE input file.
+
+If the PE input file number is not provided, it defaults to the
+last one, i.e. the last transition. The last transition can also
+be referenced with number 0. If the number is negative, then the
+corresponding transition relative to the last one is chosen.
+
+If there are warning and error PE input files or different nodes
+were the DC in the observed timeframe, it may happen that PE
+input file numbers collide. In that case provide some unique part
+of the path to the file.
+
+After the `ptest` output, logs about events that happened during
+the transition are printed.
+
+Usage:
+...............
+ transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+ transition showdot [<number>|<index>|<file>]
+ transition log [<number>|<index>|<file>]
+ transition save [<number>|<index>|<file> [name]]
+...............
+Examples:
+...............
+ transition
+ transition 444
+ transition -1
+ transition pe-error-3.bz2
+ transition node-a/pengine/pe-input-2.bz2
+ transition showdot 444
+ transition log
+ transition save 0 enigma-22
+...............
+
+[[cmdhelp_history_show,show status or configuration of the PE input file]]
+==== `show`
+
+Every transition is saved as a PE file. Use this command to
+render that PE file either as configuration or status. The
+configuration output is the same as `crm configure show`.
+
+Usage:
+...............
+ show <pe> [status]
+
+ pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+ show 2066
+ show pe-input-2080.bz2 status
+...............
+
+[[cmdhelp_history_graph,generate a directed graph from the PE file]]
+==== `graph`
+
+Create a graphviz graphical layout from the PE file (the
+transition). Every transition contains the cluster configuration
+which was active at the time. See also <<cmdhelp_configure_graph,generate a directed graph
+from configuration>>.
+
+Usage:
+...............
+ graph <pe> [<gtype> [<file> [<img_format>]]]
+
+ gtype :: dot
+ img_format :: `dot` output format (see the `-T` option)
+...............
+Example:
+...............
+ graph -1
+ graph 322 dot clu1.conf.dot
+ graph 322 dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_history_diff,cluster states/transitions difference]]
+==== `diff`
+
+A transition represents a change in cluster configuration or
+state. Use `diff` to see what has changed between two
+transitions.
+
+If you want to specify the current cluster configuration and
+status, use the string `live`.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+ diff <pe> <pe> [status] [html]
+
+ pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+ diff 2066 2067
+ diff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_history_session,manage history sessions]]
+==== `session`
+
+Sometimes you may want to get back to examining a particular
+history period or bug report. In order to make that easier, the
+current settings can be saved and later retrieved.
+
+If the current history being examined is coming from a live
+cluster the logs, PE inputs, and other files are saved too,
+because they may disappear from nodes. For the existing reports
+coming from `hb_report`, only the directory location is saved
+(not to waste space).
+
+A history session may also be packed into a tarball which can
+then be sent to support.
+
+Leave out subcommand to see the current session.
+
+Usage:
+...............
+ session [{save|load|delete} <name> | pack [<name>] | update | list]
+...............
+Examples:
+...............
+ session save bnc966622
+ session load rsclost-2
+ session list
+...............
+
+=== `end` (`cd`, `up`)
+
+The `end` command ends the current level and the user moves to
+the parent level. This command is available everywhere.
+
+Usage:
+...............
+ end
+...............
+
+=== `help`
+
+The `help` command prints help for the current level or for the
+specified topic (command). This command is available everywhere.
+
+Usage:
+...............
+ help [<topic>]
+...............
+
+=== `quit` (`exit`, `bye`)
+
+Leave the program.
+
+BUGS
+----
+Even though all sensible configurations (and most of those that
+are not) are going to be supported by the crm shell, I suspect
+that it may still happen that certain XML constructs may confuse
+the tool. When that happens, please file a bug report.
+
+The crm shell will not try to update the objects it does not
+understand. Of course, it is always possible to edit such objects
+in the XML format.
+
+AUTHOR
+------
+Dejan Muhamedagic, <dejan@suse.de>
+and many OTHERS
+
+SEE ALSO
+--------
+crm_resource(8), crm_attribute(8), crm_mon(8), cib_shadow(8),
+ptest(8), dotty(1), crm_simulate(8), cibadmin(8)
+
+
+COPYING
+-------
+Copyright \(C) 2008-2011 Dejan Muhamedagic. Free use of this
+software is granted under the terms of the GNU General Public License (GPL).
+
+//////////////////////
+ vim:ts=4:sw=4:expandtab:
+//////////////////////
diff --git a/doc/website-v1/man-2.0.adoc b/doc/website-v1/man-2.0.adoc
new file mode 100644
index 0000000..a2127d4
--- /dev/null
+++ b/doc/website-v1/man-2.0.adoc
@@ -0,0 +1,5048 @@
+:man source: crm
+:man version: 2.3.2
+:man manual: crmsh documentation
+
+crm(8)
+======
+
+NAME
+----
+crm - Pacemaker command line interface for configuration and management
+
+
+SYNOPSIS
+--------
+*crm* [OPTIONS] [SUBCOMMAND ARGS...]
+
+
+[[topics_Description,Program description]]
+DESCRIPTION
+-----------
+The `crm` shell is a command-line based cluster configuration and
+management tool. Its goal is to assist as much as possible with the
+configuration and maintenance of Pacemaker-based High Availability
+clusters.
+
+For more information on Pacemaker itself, see http://clusterlabs.org/.
+
+`crm` works both as a command-line tool to be called directly from the
+system shell, and as an interactive shell with extensive tab
+completion and help.
+
+The primary focus of the `crm` shell is to provide a simplified and
+consistent interface to Pacemaker, but it also provides tools for
+managing the creation and configuration of High Availability clusters
+from scratch. To learn more about this aspect of `crm`, see the
+`cluster` section below.
+
+The `crm` shell can be used to manage every aspect of configuring and
+maintaining a cluster. It provides a simplified line-based syntax on
+top of the XML configuration format used by Pacemaker, commands for
+starting and stopping resources, tools for exploring the history of a
+cluster including log scraping and a set of cluster scripts useful for
+automating the setup and installation of services on the cluster
+nodes.
+
+The `crm` shell is line oriented: every command must start and finish
+on the same line. It is possible to use a continuation character (+\+)
+to write one command in two or more lines. The continuation character
+is commonly used when displaying configurations.
+
+[[topics_CommandLine,Command line options]]
+OPTIONS
+-------
+*-f, --file*='FILE'::
+ Load commands from the given file. If a dash +-+ is used in place
+ of a file name, `crm` will read commands from the shell standard
+ input (`stdin`).
+
+*-c, --cib*='CIB'::
+ Start the session using the given shadow CIB file.
+ Equivalent to +cib use <CIB>+.
+
+*-D, --display=*'OUTPUT_TYPE'::
+ Choose one of the output options: +plain+, +color-always+, +color+,
+ or +uppercase+. The default is +color+ if the terminal emulation
+ supports colors. Otherwise, +plain+ is used.
+
+*-F, --force*::
+ Make `crm` proceed with applying changes where it would normally
+ ask the user to confirm before proceeding. This option is mainly
+ useful in scripts, and should be used with care.
+
+*-w, --wait*::
+ Make `crm` wait for the cluster transition to finish (for the
+ changes to take effect) after each processed line.
+
+*-H, --history*='DIR|FILE|SESSION'::
+ A directory or file containing a cluster report to load
+ into the `history` commands, or the name of a previously
+ saved history session.
+
+*-h, --help*::
+ Print help page.
+
+*--version*::
+ Print crmsh version and build information (Mercurial Hg changeset
+ hash).
+
+*-d, --debug*::
+ Print verbose debugging information.
+
+*-R, --regression-tests*::
+ Enables extra verbose trace logging used by the regression
+ tests. Logs all external calls made by crmsh.
+
+*--scriptdir*='DIR'::
+ Extra directory where crm looks for cluster scripts, or a list of
+ directories separated by semi-colons (e.g. +/dir1;/dir2;etc.+).
+
+*-o, --opt*='OPTION=VALUE'::
+ Set crmsh option temporarily. If the options are saved using
+ +options save+ then the value passed here will also be saved.
+ Multiple options can be set by using +-o+ multiple times.
+
+[[topics_Introduction,Introduction]]
+== Introduction
+
+This section of the user guide covers general topics about the user
+interface and describes some of the features of `crmsh` in detail.
+
+[[topics_Introduction_Interface,User interface]]
+=== User interface
+
+The main purpose of `crmsh` is to provide a simple yet powerful
+interface to the cluster stack. There are two main modes of operation
+with the user interface of `crmsh`:
+
+* Command line (single-shot) use - Use `crm` as a regular UNIX command
+ from your usual shell. `crm` has full bash completion built in, so
+ using it in this manner should be as comfortable and familiar as
+ using any other command-line tool.
+
+* Interactive mode - By calling `crm` without arguments, or by calling
+ it with only a sublevel as argument, `crm` enters the interactive
+ mode. In this mode, it acts as its own command shell, which
+ remembers which sublevel you are currently in and allows for rapid
+ and convenient execution of multiple commands within the same
+ sublevel. This mode also has full tab completion, as well as
+ built-in interactive help and syntax highlighting.
+
+Here are a few examples of using `crm` both as a command-line tool and
+as an interactive shell:
+
+.Command line (one-shot) use:
+........
+# crm resource stop www_app
+........
+
+.Interactive use:
+........
+# crm
+crm(live)# resource
+crm(live)resource# unmanage tetris_1
+crm(live)resource# up
+crm(live)# node standby node4
+........
+
+.Cluster configuration:
+........
+# crm configure<<EOF
+ #
+ # resources
+ #
+ primitive disk0 iscsi \
+ params portal=192.168.2.108:3260 target=iqn.2008-07.com.suse:disk0
+ primitive fs0 Filesystem \
+ params device=/dev/disk/by-label/disk0 directory=/disk0 fstype=ext3
+ primitive internal_ip IPaddr params ip=192.168.1.101
+ primitive apache apache \
+ params configfile=/disk0/etc/apache2/site0.conf
+ primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s
+ primitive pingd pingd \
+ params name=pingd dampen=5s multiplier=100 host_list="r1 r2"
+ #
+ # monitor apache and the UPS
+ #
+ monitor apache 60s:30s
+ monitor apcfence 120m:60s
+ #
+ # cluster layout
+ #
+ group internal_www \
+ disk0 fs0 internal_ip apache
+ clone fence apcfence \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ clone conn pingd \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ location node_pref internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+ #
+ # cluster properties
+ #
+ property stonith-enabled=true
+ commit
+EOF
+........
+
+The `crm` interface is hierarchical, with commands organized into
+separate levels by functionality. To list the available levels and
+commands, either execute +help <level>+, or, if at the top level of
+the shell, simply typing `help` will provide an overview of all
+available levels and commands.
+
+The +(live)+ string in the `crm` prompt signifies that the current CIB
+in use is the cluster live configuration. It is also possible to
+work with so-called <<topics_Features_Shadows,shadow CIBs>>. These are separate, inactive
+configurations stored in files, that can be applied and thereby
+replace the live configuration at any time.
+
+[[topics_Introduction_Completion,Tab completion]]
+=== Tab completion
+
+The `crm` makes extensive use of tab completion. The completion
+is both static (i.e. for `crm` commands) and dynamic. The latter
+takes into account the current status of the cluster or
+information from installed resource agents. Sometimes, completion
+may also be used to get short help on resource parameters. Here
+are a few examples:
+
+...............
+crm(live)resource# <TAB><TAB>
+bye failcount move restart unmigrate
+cd help param show unmove
+cleanup list promote start up
+demote manage quit status utilization
+end meta refresh stop
+exit migrate reprobe unmanage
+
+crm(live)configure# primitive fence-1 <TAB><TAB>
+heartbeat: lsb: ocf: stonith:
+
+crm(live)configure# primitive fence-1 stonith:<TAB><TAB>
+apcmaster external/ippower9258 fence_legacy
+apcmastersnmp external/kdumpcheck ibmhmc
+apcsmart external/libvirt ipmilan
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params <TAB><TAB>
+auth= hostname= ipaddr= login= password= port= priv=
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params auth=<TAB><TAB>
+auth* (string)
+ The authorization type of the IPMI session ("none", "straight", "md2", or "md5")
+...............
+
+`crmsh` also comes with bash completion usable directly from the
+system shell. This should be installed automatically with the command
+itself.
+
+[[topics_Introduction_Shorthand,Shorthand syntax]]
+=== Shorthand syntax
+
+When using the `crm` shell to manage clusters, you will end up typing
+a lot of commands many times over. Clear command names like
++configure+ help in understanding and learning to use the cluster
+shell, but is easy to misspell and is tedious to type repeatedly. The
+interactive mode and tab completion both help with this, but the `crm`
+shell also has the ability to understand a variety of shorthand
+aliases for all of the commands.
+
+For example, instead of typing `crm status`, you can type `crm st` or
+`crm stat`. Instead of `crm configure` you can type `crm cfg` or even
+`crm cf`. `crm resource` can be shorted as `crm rsc`, and so on.
+
+The exact list of accepted aliases is too long to print in full, but
+experimentation and typos should help in discovering more of them.
+
+[[topics_Features,Features]]
+== Features
+
+The feature set of crmsh covers a wide range of functionality, and
+understanding how and when to use the various features of the shell
+can be difficult. This section of the guide describes some of the
+features and use cases of `crmsh` in more depth. The intention is to
+provide a deeper understanding of these features, but also to serve as
+a guide to using them.
+
+[[topics_Features_Shadows,Shadow CIB usage]]
+=== Shadow CIB usage
+
+A Shadow CIB is a normal cluster configuration stored in a file.
+They may be manipulated in much the same way as the _live_ CIB, with
+the key difference that changes to a shadow CIB have no effect on the
+actual cluster resources. An administrator may choose to apply any of
+them to the cluster, thus replacing the running configuration with the
+one found in the shadow CIB.
+
+The `crm` prompt always contains the name of the configuration which
+is currently in use, or the string _live_ if using the live cluster
+configuration.
+
+When editing the configuration in the `configure` level, no changes
+are actually applied until the `commit` command is executed. It is
+possible to start editing a configuration as usual, but instead of
+committing the changes to the active CIB, save them to a shadow CIB.
+
+The following example `configure` session demonstrates how this can be
+done:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+
+[[topics_Features_Checks,Configuration semantic checks]]
+=== Configuration semantic checks
+
+Resource definitions may be checked against the meta-data
+provided with the resource agents. These checks are currently
+carried out:
+
+- are required parameters set
+- existence of defined parameters
+- timeout values for operations
+
+The parameter checks are obvious and need no further explanation.
+Failures in these checks are treated as configuration errors.
+
+The timeouts for operations should be at least as long as those
+recommended in the meta-data. Too short timeout values are a
+common mistake in cluster configurations and, even worse, they
+often slip through if cluster testing was not thorough. Though
+operation timeouts issues are treated as warnings, make sure that
+the timeouts are usable in your environment. Note also that the
+values given are just _advisory minimum_---your resources may
+require longer timeouts.
+
+User may tune the frequency of checks and the treatment of errors
+by the <<cmdhelp_options_check-frequency,`check-frequency`>> and
+<<cmdhelp_options_check-mode,`check-mode`>> preferences.
+
+Note that if the +check-frequency+ is set to +always+ and the
++check-mode+ to +strict+, errors are not tolerated and such
+configuration cannot be saved.
+
+[[topics_Features_Templates,Configuration templates]]
+=== Configuration templates
+
+.Deprecation note
+****************************
+Configuration templates have been deprecated in favor of the more
+capable `cluster scripts`. To learn how to use cluster scripts, see
+the dedicated documentation on the `crmsh` website at
+http://crmsh.github.io/, or in the <<cmdhelp_script,Script section>>.
+****************************
+
+Configuration templates are ready made configurations created by
+cluster experts. They are designed in such a way so that users
+may generate valid cluster configurations with minimum effort.
+If you are new to Pacemaker, templates may be the best way to
+start.
+
+We will show here how to create a simple yet functional Apache
+configuration:
+...............
+# crm configure
+crm(live)configure# template
+crm(live)configure template# list templates
+apache filesystem virtual-ip
+crm(live)configure template# new web <TAB><TAB>
+apache filesystem virtual-ip
+crm(live)configure template# new web apache
+INFO: pulling in template apache
+INFO: pulling in template virtual-ip
+crm(live)configure template# list
+web2-d web2 vip2 web3 vip web
+...............
+
+We enter the `template` level from `configure`. Use the `list`
+command to show templates available on the system. The `new`
+command creates a configuration from the +apache+ template. You
+can use tab completion to pick templates. Note that the apache
+template depends on a virtual IP address which is automatically
+pulled along. The `list` command shows the just created +web+
+configuration, among other configurations (I hope that you,
+unlike me, will use more sensible and descriptive names).
+
+The `show` command, which displays the resulting configuration,
+may be used to get an idea about the minimum required changes
+which have to be done. All +ERROR+ messages show the line numbers
+in which the respective parameters are to be defined:
+...............
+crm(live)configure template# show
+ERROR: 23: required parameter ip not set
+ERROR: 61: required parameter id not set
+ERROR: 65: required parameter configfile not set
+crm(live)configure template# edit
+...............
+
+The `edit` command invokes the preferred text editor with the
++web+ configuration. At the top of the file, the user is advised
+how to make changes. A good template should require from the user
+to specify only parameters. For example, the +web+ configuration
+we created above has the following required and optional
+parameters (all parameter lines start with +%%+):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip
+31:%% netmask
+35:%% lvs_support
+61:%% id
+65:%% configfile
+71:%% options
+76:%% envfiles
+...............
+
+These lines are the only ones that should be modified. Simply
+append the parameter value at the end of the line. For instance,
+after editing this template, the result could look like this (we
+used tabs instead of spaces to make the values stand out):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip 192.168.1.101
+31:%% netmask
+35:%% lvs_support
+61:%% id websvc
+65:%% configfile /etc/apache2/httpd.conf
+71:%% options
+76:%% envfiles
+...............
+
+As you can see, the parameter line format is very simple:
+...............
+%% <name> <value>
+...............
+
+After editing the file, use `show` again to display the
+configuration:
+...............
+crm(live)configure template# show
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf"
+monitor apache 120s:60s
+group websvc \
+ apache virtual-ip
+...............
+
+The target resource of the apache template is a group which we
+named +websvc+ in this sample session.
+
+This configuration looks exactly as you could type it at the
+`configure` level. The point of templates is to save you some
+typing. It is important, however, to understand the configuration
+produced.
+
+Finally, the configuration may be applied to the current
+crm configuration (note how the configuration changed slightly,
+though it is still equivalent, after being digested at the
+`configure` level):
+...............
+crm(live)configure template# apply
+crm(live)configure template# cd ..
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache virtual-ip
+...............
+
+Note that this still does not commit the configuration to the CIB
+which is used in the shell, either the running one (+live+) or
+some shadow CIB. For that you still need to execute the `commit`
+command.
+
+To complete our example, we should also define the preferred node
+to run the service:
+
+...............
+crm(live)configure# location websvc-pref websvc 100: xen-b
+...............
+
+If you are not happy with some resource names which are provided
+by default, you can rename them now:
+
+...............
+crm(live)configure# rename virtual-ip intranet-ip
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive intranet-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+To summarize, working with templates typically consists of the
+following steps:
+
+- `new`: create a new configuration from templates
+- `edit`: define parameters, at least the required ones
+- `show`: see if the configuration is valid
+- `apply`: apply the configuration to the `configure` level
+
+[[topics_Features_Testing,Resource testing]]
+=== Resource testing
+
+The amount of detail in a cluster makes all configurations prone
+to errors. By far the largest number of issues in a cluster is
+due to bad resource configuration. The shell can help quickly
+diagnose such problems. And considerably reduce your keyboard
+wear.
+
+Let's say that we entered the following configuration:
+...............
+node xen-b
+node xen-c
+node xen-d
+primitive fencer stonith:external/libvirt \
+ params hypervisor_uri="qemu+tcp://10.2.13.1/system" \
+ hostlist="xen-b xen-c xen-d" \
+ op monitor interval=2h
+primitive svc Xinetd \
+ params service=systat \
+ op monitor interval=30s
+primitive intranet-ip IPaddr2 \
+ params ip=10.2.13.100 \
+ op monitor interval=30s
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+Before typing `commit` to submit the configuration to the cib we
+can make sure that all resources are usable on all nodes:
+...............
+crm(live)configure# rsctest websvc svc fencer
+...............
+
+It is important that resources being tested are not running on
+any nodes. Otherwise, the `rsctest` command will refuse to do
+anything. Of course, if the current configuration resides in a
+CIB shadow, then a `commit` is irrelevant. The point being that
+resources are not running on any node.
+
+.Note on stopping all resources
+****************************
+Alternatively to not committing a configuration, it is also
+possible to tell Pacemaker not to start any resources:
+
+...............
+crm(live)configure# property stop-all-resources=yes
+...............
+Almost none---resources of class stonith are still started. But
+shell is not as strict when it comes to stonith resources.
+****************************
+
+Order of resources is significant insofar that a resource depends
+on all resources to its left. In most configurations, it's
+probably practical to test resources in several runs, based on
+their dependencies.
+
+Apart from groups, `crm` does not interpret constraints and
+therefore knows nothing about resource dependencies. It also
+doesn't know if a resource can run on a node at all in case of an
+asymmetric cluster. It is up to the user to specify a list of
+eligible nodes if a resource is not meant to run on every node.
+
+[[topics_Features_Security,Access Control Lists (ACL)]]
+=== Access Control Lists (ACL)
+
+.Note on ACLs in Pacemaker 1.1.12
+****************************
+The support for ACLs has been revised in Pacemaker version 1.1.12 and
+up. Depending on which version you are using, the information in this
+section may no longer be accurate. Look for the `acl_target`
+configuration element for more details on the new syntax.
+****************************
+
+By default, the users from the +haclient+ group have full access
+to the cluster (or, more precisely, to the CIB). Access control
+lists allow for finer access control to the cluster.
+
+Access control lists consist of an ordered set of access rules.
+Each rule allows read or write access or denies access
+completely. Rules are typically combined to produce a specific
+role. Then, users may be assigned a role.
+
+For instance, this is a role which defines a set of rules
+allowing management of a single resource:
+
+...............
+role bigdb_admin \
+ write meta:bigdb:target-role \
+ write meta:bigdb:is-managed \
+ write location:bigdb \
+ read ref:bigdb
+...............
+
+The first two rules allow modifying the +target-role+ and
++is-managed+ meta attributes which effectively enables users in
+this role to stop/start and manage/unmanage the resource. The
+constraints write access rule allows moving the resource around.
+Finally, the user is granted read access to the resource
+definition.
+
+For proper operation of all Pacemaker programs, it is advisable
+to add the following role to all users:
+
+...............
+role read_all \
+ read cib
+...............
+
+For finer grained read access try with the rules listed in the
+following role:
+
+...............
+role basic_read \
+ read node attribute:uname \
+ read node attribute:type \
+ read property \
+ read status
+...............
+
+It is however possible that some Pacemaker programs (e.g.
+`ptest`) may not function correctly if the whole CIB is not
+readable.
+
+Some of the ACL rules in the examples above are expanded by the
+shell to XPath specifications. For instance,
++meta:bigdb:target-role+ expands to:
+
+........
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+........
+
+You can see the expansion by showing XML:
+
+...............
+crm(live) configure# show xml bigdb_admin
+...
+<acls>
+ <acl_role id="bigdb_admin">
+ <write id="bigdb_admin-write"
+ xpath="//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']"/>
+...............
+
+Many different XPath expressions can have equal meaning. For
+instance, the following two are equal, but only the first one is
+going to be recognized as shortcut:
+
+...............
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+//resources/primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+...............
+
+XPath is a powerful language, but you should try to keep your ACL
+xpaths simple and the builtin shortcuts should be used whenever
+possible.
+
+[[topics_Features_Resourcesets,Syntax: Resource sets]]
+=== Syntax: Resource sets
+
+Using resource sets can be a bit confusing unless one knows the
+details of the implementation in Pacemaker as well as how to interpret
+the syntax provided by `crmsh`.
+
+Three different types of resource sets are provided by `crmsh`, and
+each one implies different values for the two resource set attributes,
++sequential+ and +require-all+.
+
++sequential+::
+ If false, the resources in the set do not depend on each other
+ internally. Setting +sequential+ to +true+ implies a strict order of
+ dependency within the set.
+
++require-all+::
+ If false, only one resource in the set is required to fulfil the
+ requirements of the set. The set of A, B and C with +require-all+
+ set to +false+ is be read as "A OR B OR C" when its dependencies
+ are resolved.
+
+The three types of resource sets modify the attributes in the
+following way:
+
+1. Implicit sets (no brackets). +sequential=true+, +require-all=true+
+2. Parenthesis set (+(+ ... +)+). +sequential=false+, +require-all=true+
+3. Bracket set (+[+ ... +]+). +sequential=false+, +require-all=false+
+
+To create a set with the properties +sequential=true+ and
++require-all=false+, explicitly set +sequential+ in a bracketed set,
++[ A B C sequential=true ]+.
+
+To create multiple sets with both +sequential+ and +require-all+ set to
+true, explicitly set +sequential+ in a parenthesis set:
++A B ( C D sequential=true )+.
+
+[[topics_Features_AttributeListReferences,Syntax: Attribute list references]]
+=== Syntax: Attribute list references
+
+Attribute lists are used to set attributes and parameters for
+resources, constraints and property definitions. For example, to set
+the virtual IP used by an +IPAddr2+ resource the attribute +ip+ can be
+set in an attribute list for that resource.
+
+Attribute lists can have identifiers that name them, and other
+resources can reuse the same attribute list by referring to that name
+using an +$id-ref+. For example, the following statement defines a
+simple dummy resource with an attribute list which sets the parameter
++state+ to the value 1 and sets the identifier for the attribute list
+to +on-state+:
+
+..............
+primitive dummy-1 Dummy params $id=on-state state=1
+..............
+
+To refer to this attribute list from a different resource, refer to
+the +on-state+ name using an id-ref:
+
+..............
+primitive dummy-2 Dummy params $id-ref=on-state
+..............
+
+The resource +dummy-2+ will now also have the parameter +state+ set to the value 1.
+
+[[topics_Features_AttributeReferences,Syntax: Attribute references]]
+=== Syntax: Attribute references
+
+In some cases, referencing complete attribute lists is too
+coarse-grained, for example if two different parameters with different
+names should have the same value set. Instead of having to copy the
+value in multiple places, it is possible to create references to
+individual attributes in attribute lists.
+
+To name an attribute in order to be able to refer to it later, prefix
+the attribute name with a +$+ character (as seen above with the
+special names +$id+ and +$id-ref+:
+
+............
+primitive dummy-1 Dummy params $state=1
+............
+
+The identifier +state+ can now be used to refer to this attribute from other
+primitives, using the +@<id>+ syntax:
+
+............
+primitive dummy-2 Dummy params @state
+............
+
+In some cases, using the attribute name as the identifier doesn't work
+due to name clashes. In this case, the syntax +$<id>:<name>=<value>+
+can be used to give the attribute a different identifier:
+
+............
+primitive dummy-1 params $dummy-state-on:state=1
+primitive dummy-2 params @dummy-state-on
+............
+
+There is also the possibility that two resources both use the same
+attribute value but with different names. For example, a web server
+may have a parameter +server_ip+ for setting the IP address where it
+listens for incoming requests, and a virtual IP resource may have a
+parameter called +ip+ which sets the IP address it creates. To
+configure these two resources with an IP without repeating the value,
+the reference can be given a name using the syntax +@<id>:<name>+.
+
+Example:
+............
+primitive virtual-ip IPaddr2 params $vip:ip=192.168.1.100
+primitive webserver apache params @vip:server_ip
+............
+
+[[topics_Syntax_RuleExpressions,Syntax: Rule expressions]]
+=== Syntax: Rule expressions
+
+Many of the configuration commands in `crmsh` now support the use of
+_rule expressions_, which can influence what attributes apply to a
+resource or under which conditions a constraint is applied, depending
+on changing conditions like date, time, the value of attributes and
+more.
+
+Here is an example of a simple rule expression used to apply a
+a different resource parameter on the node named `node1`:
+
+..............
+primitive my_resource Special \
+ params 2: rule #uname eq node1 interface=eth1 \
+ params 1: interface=eth0
+..............
+
+This primitive resource has two lists of parameters with descending
+priority. The parameter list with the highest priority is applied
+first, but only if the rule expressions for that parameter list all
+apply. In this case, the rule `#uname eq node1` limits the parameter
+list so that it is only applied on `node1`.
+
+Note that rule expressions are not terminated and are immediately
+followed by the data to which the rule is applied. In this case, the
+name-value pair `interface=eth1`.
+
+Rule expressions can contain multiple expressions connected using the
+boolean operator `or` and `and`. The full syntax for rule expressions
+is listed below.
+
+..............
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: <string> | <version> | <number>
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+..............
+
+[[topics_Reference,Command reference]]
+== Command reference
+
+The commands are structured to be compatible with the shell command
+line. Sometimes, the underlying Pacemaker grammar uses characters that
+have special meaning in bash, that will need to be quoted. This
+includes the hash or pound sign (`#`), single and double quotes, and
+any significant whitespace.
+
+Whitespace is also significant when assigning values, meaning that
++key=value+ is different from +key = value+.
+
+Commands can be referenced using short-hand as long as the short-hand
+is unique. This can be either a prefix of the command name or a prefix
+string of characters found in the name.
+
+For example, +status+ can be abbreviated as +st+ or +su+, and
++configure+ as +conf+ or +cfg+.
+
+The syntax for the commands is given below in an informal, BNF-like
+grammar.
+
+* `<value>` denotes a string.
+* `[value]` means that the construct is optional.
+* The ellipsis (`...`) signifies that the previous construct may be
+ repeated.
+* `first|second` means either first or second.
+* The rest are literals (strings, `:`, `=`).
+
+[[cmdhelp_root_status,Cluster status]]
+=== `status`
+
+Show cluster status. The status is displayed by `crm_mon`. Supply
+additional arguments for more information or different format.
+See `crm_mon(8)` for more details.
+
+Example:
+...............
+status
+status simple
+status full
+...............
+
+Usage:
+...............
+status [<option> ...]
+
+option :: full
+ | bynode
+ | inactive
+ | ops
+ | timing
+ | failcounts
+ | verbose
+ | quiet
+ | html
+ | xml
+ | simple
+ | tickets
+ | noheaders
+ | detail
+ | brief
+...............
+
+[[cmdhelp_root_verify,Verify cluster status]]
+=== `verify`
+
+Performs basic checks for the cluster configuration and
+current status, reporting potential issues.
+
+See `crm_verify(8)` and `crm_simulate(8)` for more details.
+
+Example:
+...............
+verify
+verify scores
+...............
+
+Usage:
+...............
+verify [scores]
+...............
+
+
+[[cmdhelp_cluster,Cluster setup and management]]
+=== `cluster` - Cluster setup and management
+
+Whole-cluster configuration management with High Availability
+awareness.
+
+The commands on the cluster level allows configuration and
+modification of the underlying cluster infrastructure, and also
+supplies tools to do whole-cluster systems management.
+
+These commands enable easy installation and maintenance of a HA
+cluster, by providing support for package installation, configuration
+of the cluster messaging layer, file system setup and more.
+
+[[cmdhelp_cluster_add,Add a new node to the cluster]]
+==== `add`
+
+This command simplifies the process of adding a new node to a running
+cluster. The new node will be installed and configured with the
+packages and configuration files needed to run the cluster
+resources. If a cluster file system is used, the new node will be set
+up to host the file system.
+
+This command should be executed from a node already in the cluster.
+
+Usage:
+...............
+add <node>
+...............
+
+[[cmdhelp_cluster_copy,Copy file to other cluster nodes]]
+==== `copy`
+
+Copy file to other cluster nodes.
+
+Copies the given file to all other nodes unless given a
+list of nodes to copy to as argument.
+
+Usage:
+...............
+copy <filename> [nodes ...]
+...............
+
+Example:
+...............
+copy /etc/motd
+...............
+
+[[cmdhelp_cluster_diff,Diff file across cluster]]
+==== `diff`
+
+Displays the difference, if any, between a given file
+on different nodes. If the second argument is `--checksum`,
+a checksum of the file will be calculated and displayed for
+each node.
+
+Usage:
+...............
+diff <file> [--checksum] [nodes...]
+...............
+
+Example:
+...............
+diff /etc/crm/crm.conf node2
+diff /etc/resolv.conf --checksum
+...............
+
+[[cmdhelp_cluster_health,Cluster health check]]
+==== `health`
+
+Runs a larger set of tests and queries on all nodes in the cluster to
+verify the general system health and detect potential problems.
+
+Usage:
+...............
+health
+...............
+
+[[cmdhelp_cluster_init,Initializes a new HA cluster]]
+==== `init`
+
+Installs and configures a basic HA cluster on a set of nodes.
+
+Usage:
+........
+init node1 node2 node3
+init --dry-run node1 node2 node3
+........
+
+[[cmdhelp_cluster_remove,Remove a node from the cluster]]
+==== `remove`
+
+This command simplifies the process of removing a node from the
+cluster, moving any resources hosted by that node to other nodes.
+
+Usage:
+...............
+remove <node>
+...............
+
+[[cmdhelp_cluster_run,Execute an arbitrary command on all nodes]]
+==== `run`
+
+This command takes a shell statement as argument, executes that
+statement on all nodes in the cluster, and reports the result.
+
+Usage:
+...............
+run <command>
+...............
+
+Example:
+...............
+run "cat /proc/uptime"
+...............
+
+[[cmdhelp_cluster_start,Start cluster services]]
+==== `start`
+
+Starts the cluster-related system services on this node.
+
+Usage:
+.........
+start
+.........
+
+[[cmdhelp_cluster_status,Cluster status check]]
+==== `status`
+
+Reports the status for the cluster messaging layer on the local
+node.
+
+Usage:
+...............
+status
+...............
+
+[[cmdhelp_cluster_stop,Stop cluster services]]
+==== `stop`
+
+Stops the cluster-related system services on this node.
+
+Usage:
+.........
+stop
+.........
+
+[[cmdhelp_cluster_wait_for_startup,Wait for cluster to start]]
+==== `wait_for_startup`
+
+Mostly useful in scripts or automated workflows, this command will
+attempt to connect to the local cluster node repeatedly. The command
+will keep trying until the cluster node responds, or the `timeout`
+elapses. The timeout can be changed by supplying a value in seconds as
+an argument.
+
+Usage:
+........
+wait_for_startup
+........
+
+[[cmdhelp_script,Cluster script management]]
+=== `script` - Cluster script management
+
+A big part of the configuration and management of a cluster is
+collecting information about all cluster nodes and deploying changes
+to those nodes. Often, just performing the same procedure on all nodes
+will encounter problems, due to subtle differences in the
+configuration.
+
+For example, when configuring a cluster for the first time, the
+software needs to be installed and configured on all nodes before the
+cluster software can be launched and configured using `crmsh`. This
+process is cumbersome and error-prone, and the goal is for scripts to
+make this process easier.
+
+Scripts are implemented using the python `parallax` package which
+provides a thin wrapper on top of SSH. This allows the scripts to
+function through the usual SSH channels used for system maintenance,
+requiring no additional software to be installed or maintained.
+
+[[cmdhelp_script_json,JSON API for cluster scripts]]
+==== `json`
+
+This command provides a JSON API for the cluster scripts, intended for
+use in user interface tools that want to interact with the cluster via
+scripts.
+
+The command takes a single argument, which should be a JSON array with
+the first member identifying the command to perform.
+
+The output is line-based: Commands that return multiple results will
+return them line-by-line, ending with a terminator value: "end".
+
+When providing parameter values to this command, they should be
+provided as nested objects, so +virtual-ip:ip=192.168.0.5+ on the
+command line becomes the JSON object
++{"virtual-ip":{"ip":"192.168.0.5"}}+.
+
+API:
+........
+["list"]
+=> [{name, shortdesc, category}]
+
+["show", <name>]
+=> [{name, shortdesc, longdesc, category, <<steps>>}]
+
+<<steps>> := [{name, shortdesc], longdesc, required, parameters, steps}]
+
+<<params>> := [{name, shortdesc, longdesc, required, unique, advanced,
+ type, value, example}]
+
+["verify", <name>, <<values>>]
+=> [{shortdesc, longdesc, text, nodes}]
+
+["run", <name>, <<values>>]
+=> [{shortdesc, rc, output|error}]
+........
+
+
+[[cmdhelp_script_list,List available scripts]]
+==== `list`
+
+Lists the available scripts, sorted by category. Scripts that have the
+special `Script` category are hidden by default, since they are mainly
+used by other scripts or commands. To also show these, pass `all` as
+argument.
+
+To get a flat list of script names, not sorted by category, pass
+`names` as an extra argument.
+
+Usage:
+............
+list [all] [names]
+............
+
+Example:
+............
+list
+list all names
+............
+
+[[cmdhelp_script_run,Run the script]]
+==== `run`
+
+Given a list of parameter values, this command will execute the
+actions specified by the cluster script. The format for the parameter
+values is the same as for the `verify` command.
+
+Can optionally take at least two parameters:
+* `nodes=<nodes>`: List of nodes that the script runs over
+* `dry_run=yes|no`: If set, the script will not perform any modifications.
+
+Additional parameters may be available depending on the script.
+
+Use the `show` command to see what parameters are available.
+
+Usage:
+.............
+run <script> [args...]
+.............
+
+Example:
+.............
+run apache install=true
+run sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+.............
+
+[[cmdhelp_script_show,Describe the script]]
+==== `show`
+
+Prints a description and short summary of the script, with
+descriptions of the accepted parameters.
+
+Advanced parameters are hidden by default. To show the complete list
+of parameters accepted by the script, pass `all` as argument.
+
+Usage:
+............
+show <script> [all]
+............
+
+Example:
+............
+show virtual-ip
+............
+
+[[cmdhelp_script_verify,Verify the script]]
+==== `verify`
+
+Checks the given parameter values, and returns a list
+of actions that will be executed when running the script
+if provided the same list of parameter values.
+
+Usage:
+............
+verify <script> [args...]
+............
+
+Example:
+............
+verify sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+............
+
+[[cmdhelp_corosync,Corosync management]]
+=== `corosync` - Corosync management
+
+Corosync is the underlying messaging layer for most HA clusters.
+This level provides commands for editing and managing the corosync
+configuration.
+
+[[cmdhelp_corosync_add-node,Add a corosync node]]
+==== `add-node`
+
+Adds a node to the corosync configuration. This is used with the `udpu`
+type configuration in corosync.
+
+A nodeid for the added node is generated automatically.
+
+Note that this command assumes that only a single ring is used, and
+sets only the address for ring0.
+
+Usage:
+.........
+add-node <addr> [name]
+.........
+
+[[cmdhelp_corosync_del-node,Remove a corosync node]]
+==== `del-node`
+
+Removes a node from the corosync configuration. The argument given is
+the `ring0_addr` address set in the configuration file.
+
+Usage:
+.........
+del-node <addr>
+.........
+
+[[cmdhelp_corosync_diff,Diffs the corosync configuration]]
+==== `diff`
+
+Diffs the corosync configurations on different nodes. If no nodes are
+given as arguments, the corosync configurations on all nodes in the
+cluster are compared.
+
+`diff` takes an option argument `--checksum`, to display a checksum
+for each file instead of calculating a diff.
+
+Usage:
+.........
+diff [--checksum] [node...]
+.........
+
+[[cmdhelp_corosync_edit,Edit the corosync configuration]]
+==== `edit`
+
+Opens the Corosync configuration file in an editor.
+
+Usage:
+.........
+edit
+.........
+
+[[cmdhelp_corosync_get,Get a corosync configuration value]]
+==== `get`
+
+Returns the value configured in `corosync.conf`, which is not
+necessarily the value used in the running configuration. See `reload`
+for telling corosync about configuration changes.
+
+The argument is the complete dot-separated path to the value.
+
+If there are multiple values configured with the same path, the
+command returns all values for that path. For example, to get all
+configured `ring0_addr` values, use this command:
+
+Example:
+........
+get nodelist.node.ring0_addr
+........
+
+[[cmdhelp_corosync_log,Show the corosync log file]]
+==== `log`
+
+Opens the log file specified in the corosync configuration file. If no
+log file is configured, this command returns an error.
+
+The pager used can be configured either using the PAGER
+environment variable or in `crm.conf`.
+
+Usage:
+.........
+log
+.........
+
+[[cmdhelp_corosync_pull,Pulls the corosync configuration]]
+==== `pull`
+
+Gets the corosync configuration from another node and copies
+it to this node.
+
+Usage:
+.........
+pull <node>
+.........
+
+[[cmdhelp_corosync_push,Push the corosync configuration]]
+==== `push`
+
+Pushes the corosync configuration file on this node to
+the list of nodes provided. If no target nodes are given,
+the configuration is pushed to all other nodes in the cluster.
+
+It is recommended to use `csync2` to distribute the cluster
+configuration files rather than relying on this command.
+
+Usage:
+.........
+push [node] ...
+.........
+
+Example:
+.........
+push node-2 node-3
+.........
+
+[[cmdhelp_corosync_reload,Reload the corosync configuration]]
+==== `reload`
+
+Tells all instances of corosync in this cluster to reload
+`corosync.conf`.
+
+After pushing a new configuration to all cluster nodes, call this
+command to make corosync use the new configuration.
+
+Usage:
+.........
+reload
+.........
+
+[[cmdhelp_corosync_set,Set a corosync configuration value]]
+==== `set`
+
+Sets the value identified by the given path. If the value does not
+exist in the configuration file, it will be added. However, if the
+section containing the value does not exist, the command will fail.
+
+Usage:
+.........
+set quorum.expected_votes 2
+.........
+
+[[cmdhelp_corosync_show,Display the corosync configuration]]
+==== `show`
+
+Displays the corosync configuration on the current node.
+
+.........
+show
+.........
+
+[[cmdhelp_corosync_status,Display the corosync status]]
+==== `status`
+
+Displays the status of Corosync, including the votequorum state.
+
+Usage:
+.........
+status
+.........
+
+[[cmdhelp_cib,CIB shadow management]]
+=== `cib` - CIB shadow management
+
+This level is for management of shadow CIBs. It is available both
+at the top level and the `configure` level.
+
+All the commands are implemented using `cib_shadow(8)` and the
+`CIB_shadow` environment variable. The user prompt always
+includes the name of the currently active shadow or the live CIB.
+
+[[cmdhelp_cib_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_cib_commit,copy a shadow CIB to the cluster]]
+==== `commit`
+
+Apply a shadow CIB to the cluster. If the shadow name is omitted
+then the current shadow CIB is applied.
+
+Temporary shadow CIBs are removed automatically on commit.
+
+Usage:
+...............
+commit [<cib>]
+...............
+
+[[cmdhelp_cib_delete,delete a shadow CIB]]
+==== `delete`
+
+Delete an existing shadow CIB.
+
+Usage:
+...............
+delete <cib>
+...............
+
+[[cmdhelp_cib_diff,diff between the shadow CIB and the live CIB]]
+==== `diff`
+
+Print differences between the current cluster configuration and
+the active shadow CIB.
+
+Usage:
+...............
+diff
+...............
+
+[[cmdhelp_cib_import,import a CIB or PE input file to a shadow]]
+==== `import`
+
+At times it may be useful to create a shadow file from the
+existing CIB. The CIB may be specified as file or as a PE input
+file number. The shell will look up files in the local directory
+first and then in the PE directory (typically `/var/lib/pengine`).
+Once the CIB file is found, it is copied to a shadow and this
+shadow is immediately available for use at both `configure` and
+`cibstatus` levels.
+
+If the shadow name is omitted then the target shadow is named
+after the input CIB file.
+
+Note that there are often more than one PE input file, so you may
+need to specify the full name.
+
+Usage:
+...............
+import {<file>|<number>} [<shadow>]
+...............
+Examples:
+...............
+import pe-warn-2222
+import 2289 issue2
+...............
+
+[[cmdhelp_cib_list,list all shadow CIBs]]
+==== `list`
+
+List existing shadow CIBs.
+
+Usage:
+...............
+list
+...............
+
+[[cmdhelp_cib_new,create a new shadow CIB]]
+==== `new`
+
+Create a new shadow CIB. The live cluster configuration and
+status is copied to the shadow CIB.
+
+If the name of the shadow is omitted, we create a temporary CIB
+shadow. It is useful if multiple level sessions are desired
+without affecting the cluster. A temporary CIB shadow is short
+lived and will be removed either on `commit` or on program exit.
+Note that if the temporary shadow is not committed all changes in
+the temporary shadow are lost.
+
+Specify `withstatus` if you want to edit the status section of
+the shadow CIB (see the <<cmdhelp_cibstatus,cibstatus section>>).
+Add `force` to force overwriting the existing shadow CIB.
+
+To start with an empty configuration that is not copied from the live
+CIB, specify the `empty` keyword. (This also allows a shadow CIB to be
+created in case no cluster is running.)
+
+Usage:
+...............
+new [<cib>] [withstatus] [force] [empty]
+...............
+
+[[cmdhelp_cib_reset,copy live cib to a shadow CIB]]
+==== `reset`
+
+Copy the current cluster configuration into the shadow CIB.
+
+Usage:
+...............
+reset <cib>
+...............
+
+[[cmdhelp_cib_use,change working CIB]]
+==== `use`
+
+Choose a CIB source. If you want to edit the status from the
+shadow CIB specify `withstatus` (see <<cmdhelp_cibstatus,`cibstatus`>>).
+Leave out the CIB name to switch to the running CIB.
+
+Usage:
+...............
+use [<cib>] [withstatus]
+...............
+
+[[cmdhelp_ra,Resource Agents (RA) lists and documentation]]
+=== `ra` - Resource Agents (RA) lists and documentation
+
+This level contains commands which show various information about
+the installed resource agents. It is available both at the top
+level and at the `configure` level.
+
+[[cmdhelp_ra_classes,list classes and providers]]
+==== `classes`
+
+Print all resource agents' classes and, where appropriate, a list
+of available providers.
+
+Usage:
+...............
+classes
+...............
+
+[[cmdhelp_ra_info,show meta data for a RA]]
+==== `info` (`meta`)
+
+Show the meta-data of a resource agent type. This is where users
+can find information on how to use a resource agent. It is also
+possible to get information from some programs: `pengine`,
+`crmd`, `cib`, and `stonithd`. Just specify the program name
+instead of an RA.
+
+Usage:
+...............
+info [<class>:[<provider>:]]<type>
+info <type> <class> [<provider>] (obsolete)
+...............
+Example:
+...............
+info apache
+info ocf:pacemaker:Dummy
+info stonith:ipmilan
+info pengine
+...............
+
+[[cmdhelp_ra_list,list RA for a class (and provider)]]
+==== `list`
+
+List available resource agents for the given class. If the class
+is `ocf`, supply a provider to get agents which are available
+only from that provider.
+
+Usage:
+...............
+list <class> [<provider>]
+...............
+Example:
+...............
+list ocf pacemaker
+...............
+
+[[cmdhelp_ra_providers,show providers for a RA and a class]]
+==== `providers`
+
+List providers for a resource agent type. The class parameter
+defaults to `ocf`.
+
+Usage:
+...............
+providers <type> [<class>]
+...............
+Example:
+...............
+providers apache
+...............
+
+[[cmdhelp_ra_validate,validate parameters for RA]]
+==== `validate`
+
+If the resource agent supports the `validate-all` action, this calls
+the action with the given parameters, printing any warnings or errors
+reported by the agent.
+
+Usage:
+................
+validate <agent> [<key>=<value> ...]
+................
+
+[[cmdhelp_resource,Resource management]]
+=== `resource` - Resource management
+
+At this level resources may be managed.
+
+All (or almost all) commands are implemented with the CRM tools
+such as `crm_resource(8)`.
+
+[[cmdhelp_resource_ban,ban a resource from a node]]
+==== `ban`
+
+Ban a resource from running on a certain node. If no node is given
+as argument, the resource is banned from the current location.
+
+See `move` for details on other arguments.
+
+Usage:
+...............
+ban <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_cleanup,cleanup resource status]]
+==== `cleanup`
+
+Cleanup resource status. Typically done after the resource has
+temporarily failed. If a node is omitted, cleanup on all nodes.
+If there are many nodes, the command may take a while.
+
++(Pacemaker 1.1.14)+ Pass force to cleanup the resource itself,
+otherwise the cleanup command will apply to the parent resource (if
+any).
+
+Usage:
+...............
+cleanup <rsc> [<node>] [force]
+...............
+
+[[cmdhelp_resource_clear,Clear any relocation constraint]]
+==== `clear` (`unmove`, `unmigrate`, `unban`)
+
+Remove any relocation constraint created by
+the `move`, `migrate` or `ban` command.
+
+Usage:
+...............
+clear <rsc>
+unmigrate <rsc>
+unban <rsc>
+...............
+
+[[cmdhelp_resource_constraints,Show constraints affecting a resource]]
+==== `constraints`
+
+Display the location and colocation constraints affecting the
+resource.
+
+Usage:
+................
+constraints <rsc>
+................
+
+[[cmdhelp_resource_demote,demote a master-slave resource]]
+==== `demote`
+
+Demote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+demote <rsc>
+...............
+
+[[cmdhelp_resource_failcount,manage failcounts]]
+==== `failcount`
+
+Show/edit/delete the failcount of a resource.
+
+Usage:
+...............
+failcount <rsc> set <node> <value>
+failcount <rsc> delete <node>
+failcount <rsc> show <node>
+...............
+Example:
+...............
+failcount fs_0 delete node2
+...............
+
+[[cmdhelp_resource_locate,show the location of resources]]
+==== `locate`
+
+Show the current location of one or more resources.
+
+Usage:
+...............
+locate [<rsc> ...]
+...............
+
+[[cmdhelp_resource_maintenance,Enable/disable per-resource maintenance mode]]
+==== `maintenance`
+
+Enables or disables the per-resource maintenance mode. When this mode
+is enabled, no monitor operations will be triggered for the resource.
+`maintenance` attribute conflicts with the `is-managed`. When setting
+the `maintenance` attribute, the user is proposed to remove the
+`is-managed` attribute if it exists.
+
+Usage:
+..................
+maintenance <resource> [on|off|true|false]
+..................
+
+Example:
+..................
+maintenance rsc1
+maintenance rsc2 off
+..................
+
+[[cmdhelp_resource_manage,put a resource into managed mode]]
+==== `manage`
+
+Manage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+`is-managed` attribute conflicts with the `maintenance`. When setting
+the `is-managed` attribute, the user is proposed to remove the
+`maintenance` attribute if it exists.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+manage <rsc>
+...............
+
+[[cmdhelp_resource_meta,manage a meta attribute]]
+==== `meta`
+
+Show/edit/delete a meta attribute of a resource. Currently, all
+meta attributes of a resource may be managed with other commands
+such as `resource stop`.
+
+Usage:
+...............
+meta <rsc> set <attr> <value>
+meta <rsc> delete <attr>
+meta <rsc> show <attr>
+...............
+Example:
+...............
+meta ip_0 set target-role stopped
+...............
+
+[[cmdhelp_resource_move,Move a resource to another node]]
+==== `move` (`migrate`)
+
+Move a resource away from its current location.
+
+If the destination node is left out, the resource is migrated by
+creating a constraint which prevents it from running on the current
+node. For this type of constraint to be created, the +force+ argument
+is required.
+
+A lifetime may be given for the constraint. Once it expires, the
+location constraint will no longer be active.
+
+Usage:
+...............
+move <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_operations,Show active resource operations]]
+==== `operations`
+
+Show active operations, optionally filtered by resource and node.
+
+Usage:
+................
+operations [<rsc>] [<node>]
+................
+
+[[cmdhelp_resource_param,manage a parameter of a resource]]
+==== `param`
+
+Show/edit/delete a parameter of a resource.
+
+Usage:
+...............
+param <rsc> set <param> <value>
+param <rsc> delete <param>
+param <rsc> show <param>
+...............
+Example:
+...............
+param ip_0 show ip
+...............
+
+[[cmdhelp_resource_promote,promote a master-slave resource]]
+==== `promote`
+
+Promote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+promote <rsc>
+...............
+
+[[cmdhelp_resource_refresh,refresh CIB from the LRM status]]
+==== `refresh`
+
+Refresh CIB from the LRM status.
+
+.Note
+****************************
+`refresh` has been deprecated and is now
+an alias for `cleanup`.
+****************************
+
+Usage:
+...............
+refresh [<node>]
+...............
+
+[[cmdhelp_resource_reprobe,probe for resources not started by the CRM]]
+==== `reprobe`
+
+Probe for resources not started by the CRM.
+
+.Note
+****************************
+`reprobe` has been deprecated and is now
+an alias for `cleanup`.
+****************************
+
+Usage:
+...............
+reprobe [<node>]
+...............
+
+[[cmdhelp_resource_restart,restart resources]]
+==== `restart`
+
+Restart one or more resources. This is essentially a shortcut for
+resource stop followed by a start. The shell is first going to wait
+for the stop to finish, that is for all resources to really stop, and
+only then to order the start action. Due to this command
+entailing a whole set of operations, informational messages are
+printed to let the user see some progress.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+restart <rsc> [<rsc> ...]
+...............
+Example:
+...............
+# crm resource restart g_webserver
+INFO: ordering g_webserver to stop
+waiting for stop to finish .... done
+INFO: ordering g_webserver to start
+#
+...............
+
+[[cmdhelp_resource_scores,Display resource scores]]
+==== `scores`
+
+Display the allocation scores for all resources.
+
+Usage:
+................
+scores
+................
+
+[[cmdhelp_resource_secret,manage sensitive parameters]]
+==== `secret`
+
+Sensitive parameters can be kept in local files rather than CIB
+in order to prevent accidental data exposure. Use the `secret`
+command to manage such parameters. `stash` and `unstash` move the
+value from the CIB and back to the CIB respectively. The `set`
+subcommand sets the parameter to the provided value. `delete`
+removes the parameter completely. `show` displays the value of
+the parameter from the local file. Use `check` to verify if the
+local file content is valid.
+
+Usage:
+...............
+secret <rsc> set <param> <value>
+secret <rsc> stash <param>
+secret <rsc> unstash <param>
+secret <rsc> delete <param>
+secret <rsc> show <param>
+secret <rsc> check <param>
+...............
+Example:
+...............
+secret fence_1 show password
+secret fence_1 stash password
+secret fence_1 set password secret_value
+...............
+
+[[cmdhelp_resource_start,start resources]]
+==== `start`
+
+Start one or more resources by setting the `target-role` attribute. If
+there are multiple meta attributes sets, the attribute is set in all
+of them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+start <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_status,show status of resources]]
+==== `status` (`show`, `list`)
+
+Print resource status. More than one resource can be shown at once. If
+the resource parameter is left out, the status of all resources is
+printed.
+
+Usage:
+...............
+status [<rsc> ...]
+...............
+
+[[cmdhelp_resource_stop,stop resources]]
+==== `stop`
+
+Stop one or more resources using the `target-role` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+stop <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_trace,start RA tracing]]
+==== `trace`
+
+Start tracing RA for the given operation. The trace files are
+stored in `$HA_VARLIB/trace_ra`. If the operation to be traced is
+monitor, note that the number of trace files can grow very
+quickly.
+
+If no operation name is given, crmsh will attempt to trace all
+operations for the RA. This includes any configured operations, start
+and stop as well as promote/demote for multistate resources.
+
+To trace the probe operation which exists for all resources, either
+set a trace for `monitor` with interval `0`, or use `probe` as the
+operation name.
+
+Usage:
+...............
+trace <rsc> [<op> [<interval>] ]
+...............
+Example:
+...............
+trace fs start
+trace webserver
+trace webserver probe
+trace fs monitor 0
+...............
+
+[[cmdhelp_resource_unmanage,put a resource into unmanaged mode]]
+==== `unmanage`
+
+Unmanage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+unmanage <rsc>
+...............
+
+[[cmdhelp_resource_untrace,stop RA tracing]]
+==== `untrace`
+
+Stop tracing RA for the given operation. If no operation name is
+given, crmsh will attempt to stop tracing all operations in resource.
+
+Usage:
+...............
+untrace <rsc> [<op> [<interval>] ]
+...............
+Example:
+...............
+untrace fs start
+untrace webserver
+...............
+
+[[cmdhelp_resource_utilization,manage a utilization attribute]]
+==== `utilization`
+
+Show/edit/delete a utilization attribute of a resource. These
+attributes describe hardware requirements. By setting the
+`placement-strategy` cluster property appropriately, it is
+possible then to distribute resources based on resource
+requirements and node size. See also <<cmdhelp_node_utilization,node utilization attributes>>.
+
+Usage:
+...............
+utilization <rsc> set <attr> <value>
+utilization <rsc> delete <attr>
+utilization <rsc> show <attr>
+...............
+Example:
+...............
+utilization xen1 set memory 4096
+...............
+
+[[cmdhelp_node,Node management]]
+=== `node` - Node management
+
+Node management and status commands.
+
+[[cmdhelp_node_attribute,manage attributes]]
+==== `attribute`
+
+Edit node attributes. This kind of attribute should refer to
+relatively static properties, such as memory size.
+
+Usage:
+...............
+attribute <node> set <attr> <value>
+attribute <node> delete <attr>
+attribute <node> show <attr>
+...............
+Example:
+...............
+attribute node_1 set memory_size 4096
+...............
+
+[[cmdhelp_node_clearstate,Clear node state]]
+==== `clearstate`
+
+Resets and clears the state of the specified node. This node is
+afterwards assumed clean and offline. This command can be used to
+manually confirm that a node has been fenced (e.g., powered off).
+
+Be careful! This can cause data corruption if you confirm that a node is
+down that is, in fact, not cleanly down - the cluster will proceed as if
+the fence had succeeded, possibly starting resources multiple times.
+
+Usage:
+...............
+clearstate <node>
+...............
+
+[[cmdhelp_node_delete,delete node]]
+==== `delete`
+
+Delete a node. This command will remove the node from the CIB
+and, in case the cluster stack is running, use the appropriate
+program (`crm_node` or `hb_delnode`) to remove the node from the
+membership.
+
+If the node is still listed as active and a member of our
+partition we refuse to remove it. With the global force option
+(`-F`) we will try to delete the node anyway.
+
+Usage:
+...............
+delete <node>
+...............
+
+[[cmdhelp_node_fence,fence node]]
+==== `fence`
+
+Make CRM fence a node. This functionality depends on stonith
+resources capable of fencing the specified node. No such stonith
+resources, no fencing will happen.
+
+Usage:
+...............
+fence <node>
+...............
+
+[[cmdhelp_node_maintenance,put node into maintenance mode]]
+==== `maintenance`
+
+Set the node status to maintenance. This is equivalent to the
+cluster-wide `maintenance-mode` property but puts just one node
+into the maintenance mode. If there are maintenaned resources on
+the node, the user will be proposed to remove the maintenance
+property from them.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+maintenance [<node>]
+...............
+
+[[cmdhelp_node_online,set node online]]
+==== `online`
+
+Set a node to online status.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+online [<node>]
+...............
+
+[[cmdhelp_node_ready,put node into ready mode]]
+==== `ready`
+
+Set the node's maintenance status to `off`. The node should be
+now again fully operational and capable of running resource
+operations.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+ready [<node>]
+...............
+
+[[cmdhelp_node_server,show node hostname or server address]]
+==== `server`
+
+Remote nodes may have a configured server address which should
+be used when contacting the node. This command prints the
+server address if configured, else the node name.
+
+If no parameter is given, the adresses or names for all nodes
+are printed.
+
+Usage:
+...............
+server [<node> ...]
+...............
+
+[[cmdhelp_node_show,show node]]
+==== `show`
+
+Show a node definition. If the node parameter is omitted then all
+nodes are shown.
+
+Usage:
+...............
+show [<node>]
+...............
+
+[[cmdhelp_node_standby,put node into standby]]
+==== `standby`
+
+Set a node to standby status. The node parameter defaults to the
+node where the command is run.
+
+Additionally, you may specify a lifetime for the standby---if set to
+`reboot`, the node will be back online once it reboots. `forever` will
+keep the node in standby after reboot. The life time defaults to
+`forever`.
+
+Usage:
+...............
+standby [<node>] [<lifetime>]
+
+lifetime :: reboot | forever
+...............
+
+Example:
+...............
+standby bob reboot
+...............
+
+
+[[cmdhelp_node_status,show nodes' status as XML]]
+==== `status`
+
+Show nodes' status as XML. If the node parameter is omitted then
+all nodes are shown.
+
+Usage:
+...............
+status [<node>]
+...............
+
+[[cmdhelp_node_status-attr,manage status attributes]]
+==== `status-attr`
+
+Edit node attributes which are in the CIB status section, i.e.
+attributes which hold properties of a more volatile nature. One
+typical example is attribute generated by the `pingd` utility.
+
+Usage:
+...............
+status-attr <node> set <attr> <value>
+status-attr <node> delete <attr>
+status-attr <node> show <attr>
+...............
+Example:
+...............
+status-attr node_1 show pingd
+...............
+
+[[cmdhelp_node_utilization,manage utilization attributes]]
+==== `utilization`
+
+Edit node utilization attributes. These attributes describe
+hardware characteristics as integer numbers such as memory size
+or the number of CPUs. By setting the `placement-strategy`
+cluster property appropriately, it is possible then to distribute
+resources based on resource requirements and node size. See also
+<<cmdhelp_resource_utilization,resource utilization attributes>>.
+
+Usage:
+...............
+utilization <node> set <attr> <value>
+utilization <node> delete <attr>
+utilization <node> show <attr>
+...............
+Examples:
+...............
+utilization node_1 set memory 16384
+utilization node_1 show cpu
+...............
+
+[[cmdhelp_site,GEO clustering site support]]
+=== `site` - GEO clustering site support
+
+A cluster may consist of two or more subclusters in different and
+distant locations. This set of commands supports such setups.
+
+[[cmdhelp_site_ticket,manage site tickets]]
+==== `ticket`
+
+Tickets are cluster-wide attributes. They can be managed at the
+site where this command is executed.
+
+It is then possible to constrain resources depending on the
+ticket availability (see the <<cmdhelp_configure_rsc_ticket,`rsc_ticket`>> command
+for more details).
+
+Usage:
+...............
+ticket {grant|revoke|standby|activate|show|time|delete} <ticket>
+...............
+Example:
+...............
+ticket grant ticket1
+...............
+
+[[cmdhelp_options,User preferences]]
+=== `options` - User preferences
+
+The user may set various options for the crm shell itself.
+
+[[cmdhelp_options_add-quotes,add quotes around parameters containing spaces]]
+==== `add-quotes`
+
+The shell (as in `/bin/sh`) parser strips quotes from the command
+line. This may sometimes make it really difficult to type values
+which contain white space. One typical example is the configure
+filter command. The crm shell will supply extra quotes around
+arguments which contain white space. The default is `yes`.
+
+.Note on quotes use
+****************************
+Adding quotes around arguments automatically has been introduced
+with version 1.2.2 and it is technically a regression. Being a
+regression is the only reason the `add-quotes` option exists. If
+you have custom shell scripts which would break, just set the
+`add-quotes` option to `no`.
+
+For instance, with adding quotes enabled, it is possible to do
+the following:
+...............
+# crm configure primitive d1 Dummy \
+ meta description="some description here"
+# crm configure filter 'sed "s/hostlist=./&node-c /"' fencing
+...............
+****************************
+
+[[cmdhelp_options_check-frequency,when to perform semantic check]]
+==== `check-frequency`
+
+Semantic check of the CIB or elements modified or created may be
+done on every configuration change (`always`), when verifying
+(`on-verify`) or `never`. It is by default set to `always`.
+Experts may want to change the setting to `on-verify`.
+
+The checks require that resource agents are present. If they are
+not installed at the configuration time set this preference to
+`never`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_check-mode,how to treat semantic errors]]
+==== `check-mode`
+
+Semantic check of the CIB or elements modified or created may be
+done in the `strict` mode or in the `relaxed` mode. In the former
+certain problems are treated as configuration errors. In the
+`relaxed` mode all are treated as warnings. The default is `strict`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_colorscheme,set colors for output]]
+==== `colorscheme`
+
+With `output` set to `color`, a comma separated list of colors
+from this option are used to emphasize:
+
+- keywords
+- object ids
+- attribute names
+- attribute values
+- scores
+- resource references
+
+`crm` can show colors only if there is curses support for python
+installed (usually provided by the `python-curses` package). The
+colors are whatever is available in your terminal. Use `normal`
+if you want to keep the default foreground color.
+
+This user preference defaults to
+`yellow,normal,cyan,red,green,magenta` which is good for
+terminals with dark background. You may want to change the color
+scheme and save it in the preferences file for other color
+setups.
+
+Example:
+...............
+colorscheme yellow,normal,blue,red,green,magenta
+...............
+
+[[cmdhelp_options_editor,set preferred editor program]]
+==== `editor`
+
+The `edit` command invokes an editor. Use this to specify your
+preferred editor program. If not set, it will default to either
+the value of the `EDITOR` environment variable or to one of the
+standard UNIX editors (`vi`,`emacs`,`nano`).
+
+Usage:
+...............
+editor program
+...............
+Example:
+...............
+editor vim
+...............
+
+[[cmdhelp_options_manage-children,how to handle children resource attributes]]
+==== `manage-children`
+
+Some resource management commands, such as `resource stop`, when
+the target resource is a group, may not always produce desired
+result. Each element, group and the primitive members, can have a
+meta attribute and those attributes may end up with conflicting
+values. Consider the following construct:
+...............
+crm(live)# configure show svc fs virtual-ip
+primitive fs Filesystem \
+ params device="/dev/drbd0" directory="/srv/nfs" fstype=ext3 \
+ op monitor interval=10s \
+ meta target-role=Started
+primitive virtual-ip IPaddr2 \
+ params ip=10.2.13.110 iflabel=1 \
+ op monitor interval=10s \
+ op start interval=0 \
+ meta target-role=Started
+group svc fs virtual-ip \
+ meta target-role=Stopped
+...............
+
+Even though the element +svc+ should be stopped, the group is
+actually running because all its members have the +target-role+
+set to +Started+:
+...............
+crm(live)# resource show svc
+resource svc is running on: xen-f
+...............
+
+Hence, if the user invokes +resource stop svc+ the intention is
+not clear. This preference gives the user an opportunity to
+better control what happens if attributes of group members have
+values which are in conflict with the same attribute of the group
+itself.
+
+Possible values are +ask+ (the default), +always+, and +never+.
+If set to +always+, the crm shell removes all children attributes
+which have values different from the parent. If set to +never+,
+all children attributes are left intact. Finally, if set to
++ask+, the user will be asked for each member what is to be done.
+
+[[cmdhelp_options_output,set output type]]
+==== `output`
+
+`crm` can adorn configurations in two ways: in color (similar to
+for instance the `ls --color` command) and by showing keywords in
+upper case. Possible values are `plain`, `color-always`, `color`,
+and 'uppercase'. It is possible to combine `uppercase` with one
+of the color values in order to get an upper case xmass tree. Just
+set this option to `color,uppercase` or `color-always,uppercase`.
+In case you need color codes in pipes, `color-always` forces color
+codes even in case the terminal is not a tty (just like `ls
+--color=always`).
+
+[[cmdhelp_options_pager,set preferred pager program]]
+==== `pager`
+
+The `view` command displays text through a pager. Use this to
+specify your preferred pager program. If not set, it will default
+to either the value of the `PAGER` environment variable or to one
+of the standard UNIX system pagers (`less`,`more`,`pg`).
+
+[[cmdhelp_options_reset,reset user preferences to factory defaults]]
+==== `reset`
+
+This command resets all user options to the defaults. If used as
+a single-shot command, the rc file (+$HOME/.config/crm/rc+) is
+reset to the defaults too.
+
+[[cmdhelp_options_save,save the user preferences to the rc file]]
+==== `save`
+
+Save current settings to the rc file (+$HOME/.config/crm/rc+). On
+further `crm` runs, the rc file is automatically read and parsed.
+
+[[cmdhelp_options_set,Set the value of a given option]]
+==== `set`
+
+Sets the value of an option. Takes the fully qualified
+name of the option as argument, as displayed by +show all+.
+
+The modified option value is stored in the user-local
+configuration file, usually found in +~/.config/crm/crm.conf+.
+
+Usage:
+........
+set <option> <value>
+........
+
+Example:
+........
+set color.warn "magenta bold"
+set editor nano
+........
+
+[[cmdhelp_options_show,show current user preference]]
+==== `show`
+
+Display all current settings.
+
+Given an option name as argument, `show` will display only the value
+of that argument.
+
+Given +all+ as argument, `show` displays all available user options.
+
+Usage:
+........
+show [all|<option>]
+........
+
+Example:
+........
+show
+show skill-level
+show all
+........
+
+[[cmdhelp_options_skill-level,set skill level]]
+==== `skill-level`
+
+Based on the skill-level setting, the user is allowed to use only
+a subset of commands. There are three levels: operator,
+administrator, and expert. The operator level allows only
+commands at the `resource` and `node` levels, but not editing
+or deleting resources. The administrator may do that and may also
+configure the cluster at the `configure` level and manage the
+shadow CIBs. The expert may do all.
+
+Usage:
+...............
+skill-level <level>
+
+level :: operator | administrator | expert
+...............
+
+.Note on security
+****************************
+The `skill-level` option is advisory only. There is nothing
+stopping any users change their skill level (see
+<<topics_Features_Security,Access Control Lists (ACL)>> on how to enforce
+access control).
+****************************
+
+[[cmdhelp_options_sort-elements,sort CIB elements]]
+==== `sort-elements`
+
+`crm` by default sorts CIB elements. If you want them appear in
+the order they were created, set this option to `no`.
+
+Usage:
+...............
+sort-elements {yes|no}
+...............
+Example:
+...............
+sort-elements no
+...............
+
+[[cmdhelp_options_user,set the cluster user]]
+==== `user`
+
+Sufficient privileges are necessary in order to manage a
+cluster: programs such as `crm_verify` or `crm_resource` and,
+ultimately, `cibadmin` have to be run either as `root` or as the
+CRM owner user (typically `hacluster`). You don't have to worry
+about that if you run `crm` as `root`. A more secure way is to
+run the program with your usual privileges, set this option to
+the appropriate user (such as `hacluster`), and setup the
+`sudoers` file.
+
+Usage:
+...............
+user system-user
+...............
+Example:
+...............
+user hacluster
+...............
+
+[[cmdhelp_options_wait,synchronous operation]]
+==== `wait`
+
+In normal operation, `crm` runs a command and gets back
+immediately to process other commands or get input from the user.
+With this option set to `yes` it will wait for the started
+transition to finish. In interactive mode dots are printed to
+indicate progress.
+
+Usage:
+...............
+wait {yes|no}
+...............
+Example:
+...............
+wait yes
+...............
+
+[[cmdhelp_configure,CIB configuration]]
+=== `configure` - CIB configuration
+
+This level enables all CIB object definition commands.
+
+The configuration may be logically divided into four parts:
+nodes, resources, constraints, and (cluster) properties and
+attributes. Each of these commands support one or more basic CIB
+objects.
+
+Nodes and attributes describing nodes are managed using the
+`node` command.
+
+Commands for resources are:
+
+- `primitive`
+- `monitor`
+- `group`
+- `clone`
+- `ms`/`master` (master-slave)
+
+In order to streamline large configurations, it is possible to
+define a template which can later be referenced in primitives:
+
+- `rsc_template`
+
+In that case the primitive inherits all attributes defined in the
+template.
+
+There are three types of constraints:
+
+- `location`
+- `colocation`
+- `order`
+
+It is possible to define fencing order (stonith resource
+priorities):
+
+- `fencing_topology`
+
+Finally, there are the cluster properties, resource meta
+attributes defaults, and operations defaults. All are just a set
+of attributes. These attributes are managed by the following
+commands:
+
+- `property`
+- `rsc_defaults`
+- `op_defaults`
+
+In addition to the cluster configuration, the Access Control
+Lists (ACL) can be setup to allow access to parts of the CIB for
+users other than +root+ and +hacluster+. The following commands
+manage ACL:
+
+- `user`
+- `role`
+
+In Pacemaker 1.1.12 and up, this command replaces the `user` command
+for handling ACLs:
+
+- `acl_target`
+
+The changes are applied to the current CIB only on ending the
+configuration session or using the `commit` command.
+
+Comments start with +#+ in the first line. The comments are tied
+to the element which follows. If the element moves, its comments
+will follow.
+
+[[cmdhelp_configure_acl_target,Define target access rights]]
+==== `acl_target`
+
+Defines an ACL target.
+
+Usage:
+................
+acl_target <tid> [<role> ...]
+................
+Example:
+................
+acl_target joe resource_admin constraint_editor
+................
+
+[[cmdhelp_configure_alert,Event-driven alerts]]
+==== `alert`
+
+.Version note
+****************************
+This feature is only available
+in Pacemaker 1.1.15+.
+****************************
+
+Event-driven alerts enables calling scripts whenever interesting
+events occur in the cluster (nodes joining or leaving, resources
+starting or stopping, etc.).
+
+The +path+ is an arbitrary file path to an alert script. Existing
+external scripts used with ClusterMon resources can be used as alert
+scripts, since the interface is compatible.
+
+Each alert may have a number of receipients configured. These will be
+passed to the script as arguments. The first recipient will also be
+passed as the +CRM_alert_recipient+ environment variable, for
+compatibility with existing scripts that only support one recipient.
+
+The available meta attributes are +timeout+ (default 30s) and
++timestamp-format+ (default `"%H:%M:%S.%06N"`).
+
+Some configurations may require each recipient to be delimited by
+brackets, to avoid ambiguity. In the example +alert-2+ below, the meta
+attribute for `timeout` is defined after the recipient, so the
+brackets are used to ensure that the meta attribute is set for the
+alert and not just the recipient. This can be avoided by setting any
+alert attributes before defining the recipients.
+
+Usage:
+...............
+alert <id> <path> \
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] \
+ [to [{] <recipient>
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] [}] \
+ ...]
+...............
+
+Example:
+...............
+alert alert-1 /srv/pacemaker/pcmk_alert_sample.sh \
+ to /var/log/cluster-alerts.log
+
+alert alert-2 /srv/pacemaker/example_alert.sh \
+ meta timeout=60s \
+ to { /var/log/cluster-alerts.log }
+...............
+
+[[cmdhelp_configure_cib,CIB shadow management]]
+==== `cib`
+
+This level is for management of shadow CIBs. It is available at
+the `configure` level to enable saving intermediate changes to a
+shadow CIB instead of to the live cluster. This short excerpt
+shows how:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+Note how the current CIB in the prompt changed from +live+ to
++test-2+ after issuing the `cib new` command. See also the
+<<cmdhelp_cib,CIB shadow management>> for more information.
+
+[[cmdhelp_configure_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_configure_clone,define a clone]]
+==== `clone`
+
+The `clone` command creates a resource clone. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+clone <name> <rsc>
+ [description=<description>]
+ [meta <attr_list>]
+ [params <attr_list>]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+clone cl_fence apc_1 \
+ meta clone-node-max=1 globally-unique=false
+...............
+
+[[cmdhelp_configure_colocation,colocate resources]]
+==== `colocation` (`collocation`)
+
+This constraint expresses the placement relation between two
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+The score is used to indicate the priority of the constraint. A
+positive score indicates that the resources should run on the same
+node. A negative score that they should not run on the same
+node. Values of positive or negative +infinity+ indicate a mandatory
+constraint.
+
+In the two resource form, the cluster will place +<with-rsc>+ first,
+and then decide where to put the +<rsc>+ resource.
+
+Collocation resource sets have an extra attribute (+sequential+)
+to allow for sets of resources which don't depend on each other
+in terms of state. The shell syntax for such sets is to put
+resources in parentheses.
+
+Sets cannot be nested.
+
+The optional +node-attribute+ can be used to colocate resources on a
+set of nodes and not necessarily on the same node. For example, by
+setting a node attribute +color+ on all nodes and setting the
++node-attribute+ value to +color+ as well, the colocated resources
+will be placed on any node that has the same color.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+colocation <id> <score>: <rsc>[:<role>] <with-rsc>[:<role>]
+ [node-attribute=<node_attr>]
+
+colocation <id> <score>: <resource_sets>
+ [node-attribute=<node_attr>]
+
+resource_sets :: <resource_set> [<resource_set> ...]
+
+resource_set :: ["("|"["] <rsc>[:<role>] [<rsc>[:<role>] ...] \
+ [<attributes>] [")"|"]"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+colocation never_put_apache_with_dummy -inf: apache dummy
+colocation c1 inf: A ( B C )
+...............
+
+[[cmdhelp_configure_commit,commit the changes to the CIB]]
+==== `commit`
+
+Commit the current configuration to the CIB in use. As noted
+elsewhere, commands in a configure session don't have immediate
+effect on the CIB. All changes are applied at one point in time,
+either using `commit` or when the user leaves the configure
+level. In case the CIB in use changed in the meantime, presumably
+by somebody else, the crm shell will refuse to apply the changes.
+
+If you know that it's fine to still apply them, add +force+ to the
+command line.
+
+To disable CIB patching and apply the changes by replacing the CIB
+completely, add +replace+ to the command line. Note that this can lead
+to previous changes being overwritten if some other process
+concurrently modifies the CIB.
+
+Usage:
+...............
+commit [force] [replace]
+...............
+
+[[cmdhelp_configure_default-timeouts,set timeouts for operations to minimums from the meta-data]]
+==== `default-timeouts`
+
+This command takes the timeouts from the actions section of the
+resource agent meta-data and sets them for the operations of the
+primitive.
+
+Usage:
+...............
+default-timeouts <id> [<id>...]
+...............
+
+.Note on `default-timeouts`
+****************************
+The use of this command is discouraged in favor of manually
+determining the best timeouts required for the particular
+configuration. Relying on the resource agent to supply appropriate
+timeouts can cause the resource to fail at the worst possible moment.
+
+Appropriate timeouts for resource actions are context-sensitive, and
+should be carefully considered with the whole configuration in mind.
+****************************
+
+[[cmdhelp_configure_delete,delete CIB objects]]
+==== `delete`
+
+Delete one or more objects. If an object to be deleted belongs to
+a container object, such as a group, and it is the only resource
+in that container, then the container is deleted as well. Any
+related constraints are removed as well.
+
+If the object is a started resource, it will not be deleted unless the
++--force+ flag is passed to the command, or the +force+ option is set.
+
+Usage:
+...............
+delete [--force] <id> [<id>...]
+...............
+
+[[cmdhelp_configure_edit,edit CIB objects]]
+==== `edit`
+
+This command invokes the editor with the object description. As
+with the `show` command, the user may choose to edit all objects
+or a set of objects.
+
+If the user insists, he or she may edit the XML edition of the
+object. If you do that, don't modify any id attributes.
+
+Usage:
+...............
+edit [xml] [<id> ...]
+edit [xml] changed
+...............
+
+.Note on renaming element ids
+****************************
+The edit command sometimes cannot properly handle modifying
+element ids. In particular for elements which belong to group or
+ms resources. Group and ms resources themselves also cannot be
+renamed. Please use the `rename` command instead.
+****************************
+
+[[cmdhelp_configure_erase,erase the CIB]]
+==== `erase`
+
+The `erase` clears all configuration. Apart from nodes. To remove
+nodes, you have to specify an additional keyword `nodes`.
+
+Note that removing nodes from the live cluster may have some
+strange/interesting/unwelcome effects.
+
+Usage:
+...............
+erase [nodes]
+...............
+
+[[cmdhelp_configure_fencing_topology,node fencing order]]
+==== `fencing_topology`
+
+If multiple fencing (stonith) devices are available capable of
+fencing a node, their order may be specified by +fencing_topology+.
+The order is specified per node.
+
+Stonith resources can be separated by +,+ in which case all of
+them need to succeed. If they fail, the next stonith resource (or
+set of resources) is used. In other words, use comma to separate
+resources which all need to succeed and whitespace for serial
+order. It is not allowed to use whitespace around comma.
+
+If the node is left out, the order is used for all nodes.
+That should reduce the configuration size in some stonith setups.
+
+From Pacemaker version 1.1.14, it is possible to use a node attribute
+as the +target+ in a fencing topology. The syntax for this usage is
+described below.
+
+From Pacemaker version 1.1.14, it is also possible to use regular
+expression patterns as the +target+ in a fencing topology. The configured
+fencing sequence then applies to all devices matching the pattern.
+
+Usage:
+...............
+fencing_topology <stonith_resources> [<stonith_resources> ...]
+fencing_topology <fencing_order> [<fencing_order> ...]
+
+fencing_order :: <target> <stonith_resources> [<stonith_resources> ...]
+
+stonith_resources :: <rsc>[,<rsc>...]
+target :: <node>: | attr:<node-attribute>=<value> | pattern:<pattern>
+...............
+Example:
+...............
+# Only kill the power if poison-pill fails
+fencing_topology poison-pill power
+
+# As above for node-a, but a different strategy for node-b
+fencing_topology \
+ node-a: poison-pill power \
+ node-b: ipmi serial
+
+# Fencing anything on rack 1 requires fencing via both APC 1 and 2,
+# to defeat the redundancy provided by two separate UPS units.
+fencing_topology attr:rack=1 apc01,apc02
+
+# Fencing for all machines named green.* is done using the pear
+# fencing device first, while all machines named red.* are fenced
+# using the apple fencing device first.
+fencing_topology \
+ pattern:green.* pear apple \
+ pattern:red.* apple pear
+...............
+
+[[cmdhelp_configure_filter,filter CIB objects]]
+==== `filter`
+
+This command filters the given CIB elements through an external
+program. The program should accept input on `stdin` and send
+output to `stdout` (the standard UNIX filter conventions). As
+with the `show` command, the user may choose to filter all or
+just a subset of elements.
+
+It is possible to filter the XML representation of objects, but
+probably not as useful as the configuration language. The
+presentation is somewhat different from what would be displayed
+by the `show` command---each element is shown on a single line,
+i.e. there are no backslashes and no other embelishments.
+
+Don't forget to put quotes around the filter if it contains
+spaces.
+
+Usage:
+...............
+filter <prog> [xml] [<id> ...]
+filter <prog> [xml] changed
+...............
+Examples:
+...............
+filter "sed '/^primitive/s/target-role=[^ ]*//'"
+# crm configure filter "sed '/^primitive/s/target-role=[^ ]*//'"
+crm configure <<END
+ filter "sed '/threshold=\"1\"/s/=\"1\"/=\"0\"/g'"
+END
+...............
+
+.Note on quotation marks
+**************************
+Filter commands which feature a blend of quotation marks can be
+difficult to get right, especially when used directly from bash, since
+bash does its own quotation parsing. In these cases, it can be easier
+to supply the filter command as standard input. See the last example
+above.
+**************************
+
+[[cmdhelp_configure_get_property,Get property value]]
+==== `get-property`
+
+Show the value of the given property. If the value is not set, the
+command will print the default value for the property, if known.
+
+If no property name is passed to the command, the list of known
+cluster properties is printed.
+
+If the property is set multiple times, for example using multiple
+property sets with different rule expressions, the output of this
+command is undefined.
+
+Pass the argument +-t+ or +--true+ to `get-property` to translate
+the argument value into +true+ or +false+. If the value is not
+set, the command will print +false+.
+
+Usage:
+...............
+get-property [-t|--true] [<name>]
+...............
+
+Example:
+...............
+get-property stonith-enabled
+get-property -t maintenance-mode
+...............
+
+[[cmdhelp_configure_graph,generate a directed graph]]
+==== `graph`
+
+Create a graphviz graphical layout from the current cluster
+configuration.
+
+Currently, only `dot` (directed graph) is supported. It is
+essentially a visualization of resource ordering.
+
+The graph may be saved to a file which can be used as source for
+various graphviz tools (by default it is displayed in the user's
+X11 session). Optionally, by specifying the format, one can also
+produce an image instead.
+
+For more or different graphviz attributes, it is possible to save
+the default set of attributes to an ini file. If this file exists
+it will always override the builtin settings. The +exportsettings+
+subcommand also prints the location of the ini file.
+
+Usage:
+...............
+graph [<gtype> [<file> [<img_format>]]]
+graph exportsettings
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph dot
+graph dot clu1.conf.dot
+graph dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_configure_group,define a group]]
+==== `group`
+
+The `group` command creates a group of resources. This can be useful
+when resources depend on other resources and require that those
+resources start in order on the same node. A common use of resource
+groups is to ensure that a server and a virtual IP are located
+together, and that the virtual IP is started before the server.
+
+Grouped resources are started in the order they appear in the group,
+and stopped in the reverse order. If a resource in the group cannot
+run anywhere, resources following it in the group will not start.
+
+`group` can be passed the "container" meta attribute, to indicate that
+it is to be used to group VM resources monitored using Nagios. The
+resource referred to by the container attribute must be of type
+`ocf:heartbeat:Xen`, `ocf:heartbeat:VirtualDomain` or `ocf:heartbeat:lxc`.
+
+Usage:
+...............
+group <name> <rsc> [<rsc>...]
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+group internal_www disk0 fs0 internal_ip apache \
+ meta target_role=stopped
+
+group vm-and-services vm vm-sshd meta container="vm"
+...............
+
+[[cmdhelp_configure_load,import the CIB from a file]]
+==== `load`
+
+Load a part of configuration (or all of it) from a local file or
+a network URL. The +replace+ method replaces the current
+configuration with the one from the source. The +update+ method
+tries to import the contents into the current configuration. The
++push+ method imports the contents into the current configuration
+and removes any lines that are not present in the given
+configuration.
+The file may be a CLI file or an XML file.
+
+If the URL is `-`, the configuration is read from standard input.
+
+Usage:
+...............
+load [xml] <method> URL
+
+method :: replace | update | push
+...............
+Example:
+...............
+load xml update myfirstcib.xml
+load xml replace http://storage.big.com/cibs/bigcib.xml
+load xml push smallcib.xml
+...............
+
+[[cmdhelp_configure_location,a location preference]]
+==== `location`
+
+`location` defines the preference of nodes for the given
+resource. The location constraints consist of one or more rules
+which specify a score to be awarded if the rule matches.
+
+The resource referenced by the location constraint can be one of the
+following:
+
+* Plain resource reference: +location loc1 webserver 100: node1+
+* Resource set in curly brackets: +location loc1 { virtual-ip webserver } 100: node1+
+* Tag containing resource ids: +location loc1 tag1 100: node1+
+* Resource pattern: +location loc1 /web.*/ 100: node1+
+
+The +resource-discovery+ attribute allows probes to be selectively
+enabled or disabled per resource and node.
+
+The syntax for resource sets is described in detail for
+<<cmdhelp_configure_colocation,`colocation`>>.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+location <id> <rsc> [<attributes>] {<node_pref>|<rules>}
+
+rsc :: /<rsc-pattern>/
+ | { resource_sets }
+ | <rsc>
+
+attributes :: role=<role> | resource-discovery=always|never|exclusive
+
+node_pref :: <score>: <node>
+
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: string | version | number
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+...............
+Examples:
+...............
+location conn_1 internal_www 100: node1
+
+location conn_1 internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+
+location conn_2 dummy_float \
+ rule -inf: not_defined pingd or pingd number:lte 0
+
+# never probe for rsc1 on node1
+location no-probe rsc1 resource-discovery=never -inf: node1
+...............
+
+[[cmdhelp_configure_modgroup,modify group]]
+==== `modgroup`
+
+Add or remove primitives in a group. The `add` subcommand appends
+the new group member by default. Should it go elsewhere, there
+are `after` and `before` clauses.
+
+Usage:
+...............
+modgroup <id> add <id> [after <id>|before <id>]
+modgroup <id> remove <id>
+...............
+Examples:
+...............
+modgroup share1 add storage2 before share1-fs
+...............
+
+[[cmdhelp_configure_monitor,add monitor operation to a primitive]]
+==== `monitor`
+
+Monitor is by far the most common operation. It is possible to
+add it without editing the whole resource. Also, long primitive
+definitions may be a bit uncluttered. In order to make this
+command as concise as possible, less common operation attributes
+are not available. If you need them, then use the `op` part of
+the `primitive` command.
+
+Usage:
+...............
+monitor <rsc>[:<role>] <interval>[:<timeout>]
+...............
+Example:
+...............
+monitor apcfence 60m:60s
+...............
+
+Note that after executing the command, the monitor operation may
+be shown as part of the primitive definition.
+
+[[cmdhelp_configure_ms,define a master-slave resource]]
+==== `ms` (`master`)
+
+The `ms` command creates a master/slave resource type. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+ms <name> <rsc>
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+ms disk1 drbd1 \
+ meta notify=true globally-unique=false
+...............
+
+.Note on `id-ref` usage
+****************************
+Instance or meta attributes (`params` and `meta`) may contain
+a reference to another set of attributes. In that case, no other
+attributes are allowed. Since attribute sets' ids, though they do
+exist, are not shown in the `crm`, it is also possible to
+reference an object instead of an attribute set. `crm` will
+automatically replace such a reference with the right id:
+
+...............
+crm(live)configure# primitive a2 www-2 meta $id-ref=a1
+crm(live)configure# show a2
+primitive a2 apache \
+ meta $id-ref=a1-meta_attributes
+ [...]
+...............
+It is advisable to give meaningful names to attribute sets which
+are going to be referenced.
+****************************
+
+[[cmdhelp_configure_node,define a cluster node]]
+==== `node`
+
+The node command describes a cluster node. Nodes in the CIB are
+commonly created automatically by the CRM. Hence, you should not
+need to deal with nodes unless you also want to define node
+attributes. Note that it is also possible to manage node
+attributes at the `node` level.
+
+Usage:
+...............
+node [$id=<id>] <uname>[:<type>]
+ [description=<description>]
+ [attributes [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+ [utilization [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+
+type :: normal | member | ping | remote
+...............
+Example:
+...............
+node node1
+node big_node attributes memory=64
+...............
+
+[[cmdhelp_configure_op_defaults,set resource operations defaults]]
+==== `op_defaults`
+
+Set defaults for the operations meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+op_defaults [$id=<set_id>] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+op_defaults record-pending=true
+...............
+
+[[cmdhelp_configure_order,order resources]]
+==== `order`
+
+This constraint expresses the order of actions on two resources
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+Ordered resource sets have an extra attribute to allow for sets
+of resources whose actions may run in parallel. The shell syntax
+for such sets is to put resources in parentheses.
+
+If the subsequent resource can start or promote after any one of the
+resources in a set has done, enclose the set in brackets (+[+ and +]+).
+
+Sets cannot be nested.
+
+Three strings are reserved to specify a kind of order constraint:
++Mandatory+, +Optional+, and +Serialize+. It is preferred to use
+one of these settings instead of score. Previous versions mapped
+scores +0+ and +inf+ to keywords +advisory+ and +mandatory+.
+That is still valid but deprecated.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+order <id> [{kind|<score>}:] first then [symmetrical=<bool>]
+
+order <id> [{kind|<score>}:] resource_sets [symmetrical=<bool>]
+
+kind :: Mandatory | Optional | Serialize
+
+first :: <rsc>[:<action>]
+
+then :: <rsc>[:<action>]
+
+resource_sets :: resource_set [resource_set ...]
+
+resource_set :: ["["|"("] <rsc>[:<action>] [<rsc>[:<action>] ...] \
+ [attributes] ["]"|")"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+order o-1 Mandatory: apache:start ip_1
+order o-2 Serialize: A ( B C )
+order o-3 inf: [ A B ] C
+order o-4 first-resource then-resource
+...............
+
+[[cmdhelp_configure_primitive,define a resource]]
+==== `primitive`
+
+The primitive command describes a resource. It may be referenced
+only once in group, clone, or master-slave objects. If it's not
+referenced, then it is placed as a single resource in the CIB.
+
+Operations may be specified anonymously, as a group or by reference:
+
+* "Anonymous", as a list of +op+ specifications. Use this
+ method if you don't need to reference the set of operations
+ elsewhere. This is the most common way to define operations.
+
+* If reusing operation sets is desired, use the +operations+ keyword
+ along with an id to give the operations set a name. Use the
+ +operations+ keyword and an id-ref value set to the id of another
+ operations set, to apply the same set of operations to this
+ primitive.
+
+Operation attributes which are not recognized are saved as
+instance attributes of that operation. A typical example is
++OCF_CHECK_LEVEL+.
+
+For multistate resources, roles are specified as +role=<role>+.
+
+A template may be defined for resources which are of the same
+type and which share most of the configuration. See
+<<cmdhelp_configure_rsc_template,`rsc_template`>> for more information.
+
+Attributes containing time values, such as the +interval+ attribute on
+operations, are configured either as a plain number, which is
+interpreted as a time in seconds, or using one of the following
+suffixes:
+
+* +s+, +sec+ - time in seconds (same as no suffix)
+* +ms+, +msec+ - time in milliseconds
+* +us+, +usec+ - time in microseconds
+* +m+, +min+ - time in minutes
+* +h+, +hr+ - time in hours
+
+Usage:
+...............
+primitive <rsc> {[<class>:[<provider>:]]<type>|@<template>}
+ [description=<description>]
+ [[params] attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+attr_list :: [$id=<id>] [<score>:] [rule...]
+ <attr>=<val> [<attr>=<val>...]] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s \
+ op monitor interval=30m timeout=60s
+
+primitive www8 apache \
+ configfile=/etc/apache/www8.conf \
+ operations $id-ref=apache_ops
+
+primitive db0 mysql \
+ params config=/etc/mysql/db0.conf \
+ op monitor interval=60s \
+ op monitor interval=300s OCF_CHECK_LEVEL=10
+
+primitive r0 ocf:linbit:drbd \
+ params drbd_resource=r0 \
+ op monitor role=Master interval=60s \
+ op monitor role=Slave interval=300s
+
+primitive xen0 @vm_scheme1 xmfile=/etc/xen/vm/xen0
+
+primitive mySpecialRsc Special \
+ params 3: rule #uname eq node1 interface=eth1 \
+ params 2: rule #uname eq node2 interface=eth2 port=8888 \
+ params 1: interface=eth0 port=9999
+
+...............
+
+[[cmdhelp_configure_property,set a cluster property]]
+==== `property`
+
+Set cluster configuration properties. To list the
+available cluster configuration properties, use the
+<<cmdhelp_ra_info,`ra info`>> command with +pengine+, +crmd+,
++cib+ and +stonithd+ as arguments.
+When setting the +maintenance-mode+ property, it will
+inform the user if there are nodes or resources that
+have the +maintenance+ property.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+property [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+property stonith-enabled=true
+property rule date spec years=2014 stonith-enabled=false
+...............
+
+[[cmdhelp_configure_ptest,show cluster actions if changes were committed]]
+==== `ptest` (`simulate`)
+
+Show PE (Policy Engine) motions using `ptest(8)` or
+`crm_simulate(8)`.
+
+A CIB is constructed using the current user edited configuration
+and the status from the running CIB. The resulting CIB is run
+through `ptest` (or `crm_simulate`) to show changes which would
+happen if the configuration is committed.
+
+The status section may be loaded from another source and modified
+using the <<cmdhelp_cibstatus,`cibstatus`>> level commands. In that case, the
+`ptest` command will issue a message informing the user that the
+Policy Engine graph is not calculated based on the current status
+section and therefore won't show what would happen to the
+running but some imaginary cluster.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Add a string of +v+ characters to increase verbosity. `ptest`
+can also show allocation scores. +utilization+ turns on
+information about the remaining capacity of nodes. With the
++actions+ option, `ptest` will print all resource actions.
+
+The `ptest` program has been replaced by `crm_simulate` in newer
+Pacemaker versions. In some installations both could be
+installed. Use `simulate` to enfore using `crm_simulate`.
+
+Usage:
+...............
+ptest [nograph] [v...] [scores] [actions] [utilization]
+...............
+Examples:
+...............
+ptest scores
+ptest vvvvv
+simulate actions
+...............
+
+[[cmdhelp_configure_refresh,refresh from CIB]]
+==== `refresh`
+
+Refresh the internal structures from the CIB. All changes made
+during this session are lost.
+
+Usage:
+...............
+refresh
+...............
+
+[[cmdhelp_configure_rename,rename a CIB object]]
+==== `rename`
+
+Rename an object. It is recommended to use this command to rename
+a resource, because it will take care of updating all related
+constraints and a parent resource. Changing ids with the edit
+command won't have the same effect.
+
+If you want to rename a resource, it must be in the stopped state.
+
+Usage:
+...............
+rename <old_id> <new_id>
+...............
+
+[[cmdhelp_configure_role,define role access rights]]
+==== `role`
+
+An ACL role is a set of rules which describe access rights to
+CIB. Rules consist of an access right +read+, +write+, or +deny+
+and a specification denoting part of the configuration to which
+the access right applies. The specification can be an XPath or a
+combination of tag and id references. If an attribute is
+appended, then the specification applies only to that attribute
+of the matching element.
+
+There is a number of shortcuts for XPath specifications. The
++meta+, +params+, and +utilization+ shortcuts reference resource
+meta attributes, parameters, and utilization respectively. The
+`location` may be used to specify location constraints most of
+the time to allow resource `move` and `unmove` commands. The
+`property` references cluster properties. The `node` allows
+reading node attributes. +nodeattr+ and +nodeutil+ reference node
+attributes and node capacity (utilization). The `status` shortcut
+references the whole status section of the CIB. Read access to
+status is necessary for various monitoring tools such as
+`crm_mon(8)` (aka `crm status`).
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+role <role-id> rule [rule ...]
+
+rule :: acl-right cib-spec [attribute:<attribute>]
+
+acl-right :: read | write | deny
+
+cib-spec :: xpath-spec | tag-ref-spec
+xpath-spec :: xpath:<xpath> | shortcut
+tag-ref-spec :: tag:<tag> | ref:<id> | tag:<tag> ref:<id>
+
+shortcut :: meta:<rsc>[:<attr>]
+ params:<rsc>[:<attr>]
+ utilization:<rsc>
+ location:<rsc>
+ property[:<attr>]
+ node[:<node>]
+ nodeattr[:<attr>]
+ nodeutil[:<node>]
+ status
+...............
+Example:
+...............
+role app1_admin \
+ write meta:app1:target-role \
+ write meta:app1:is-managed \
+ write location:app1 \
+ read ref:app1
+...............
+
+[[cmdhelp_configure_rsc_defaults,set resource defaults]]
+==== `rsc_defaults`
+
+Set defaults for the resource meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+rsc_defaults [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+rsc_defaults failure-timeout=3m
+...............
+
+[[cmdhelp_configure_rsc_template,define a resource template]]
+==== `rsc_template`
+
+The `rsc_template` command creates a resource template. It may be
+referenced in primitives. It is used to reduce large
+configurations with many similar resources.
+
+Usage:
+...............
+rsc_template <name> [<class>:[<provider>:]]<type>
+ [description=<description>]
+ [params attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+rsc_template public_vm Xen \
+ op start timeout=300s \
+ op stop timeout=300s \
+ op monitor interval=30s timeout=60s \
+ op migrate_from timeout=600s \
+ op migrate_to timeout=600s
+primitive xen0 @public_vm \
+ params xmfile=/etc/xen/xen0
+primitive xen1 @public_vm \
+ params xmfile=/etc/xen/xen1
+...............
+
+[[cmdhelp_configure_rsc_ticket,resources ticket dependency]]
+==== `rsc_ticket`
+
+This constraint expresses dependency of resources on cluster-wide
+attributes, also known as tickets. Tickets are mainly used in
+geo-clusters, which consist of multiple sites. A ticket may be
+granted to a site, thus allowing resources to run there.
+
+The +loss-policy+ attribute specifies what happens to the
+resource (or resources) if the ticket is revoked. The default is
+either +stop+ or +demote+ depending on whether a resource is
+multi-state.
+
+See also the <<cmdhelp_site_ticket,`site`>> set of commands.
+
+Usage:
+...............
+rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]
+
+loss_policy_action :: stop | demote | fence | freeze
+...............
+Example:
+...............
+rsc_ticket ticket-A_public-ip ticket-A: public-ip
+rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence
+rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master
+...............
+
+
+[[cmdhelp_configure_rsctest,test resources as currently configured]]
+==== `rsctest`
+
+Test resources with current resource configuration. If no nodes
+are specified, tests are run on all known nodes.
+
+The order of resources is significant: it is assumed that later
+resources depend on earlier ones.
+
+If a resource is multi-state, it is assumed that the role on
+which later resources depend is master.
+
+Tests are run sequentially to prevent running the same resource
+on two or more nodes. Tests are carried out only if none of the
+specified nodes currently run any of the specified resources.
+However, it won't verify whether resources run on the other
+nodes.
+
+Superuser privileges are obviously required: either run this as
+root or setup the `sudoers` file appropriately.
+
+Note that resource testing may take some time.
+
+Usage:
+...............
+rsctest <rsc_id> [<rsc_id> ...] [<node_id> ...]
+...............
+Examples:
+...............
+rsctest my_ip websvc
+rsctest websvc nodeB
+...............
+
+[[cmdhelp_configure_save,save the CIB to a file]]
+==== `save`
+
+Save the current configuration to a file. Optionally, as XML. Use
++-+ instead of file name to write the output to `stdout`.
+
+The `save` command accepts the same selection arguments as the `show`
+command. See the <<cmdhelp_configure_show,help section>> for `show`
+for more details.
+
+Usage:
+...............
+save [xml] [<id> | type:<type | tag:<tag> |
+ related:<obj> | changed ...] <file>
+...............
+Example:
+...............
+save myfirstcib.txt
+save web-server server-config.txt
+...............
+
+[[cmdhelp_configure_schema,set or display current CIB RNG schema]]
+==== `schema`
+
+CIB's content is validated by a RNG schema. Pacemaker supports
+several, depending on version. At least the following schemas are
+accepted by `crmsh`:
+
+* +pacemaker-1.0+
+* +pacemaker-1.1+
+* +pacemaker-1.2+
+* +pacemaker-1.3+
+* +pacemaker-2.0+
+
+Use this command to display or switch to another RNG schema.
+
+Usage:
+...............
+schema [<schema>]
+...............
+Example:
+...............
+schema pacemaker-1.1
+...............
+
+[[cmdhelp_configure_set,set an attribute value]]
+==== `set`
+
+Set the value of a configured attribute. The attribute must
+have a value configured previously, and can be an agent
+parameter, meta attribute or utilization value.
+
+The first argument to the command is a path to an attribute.
+This is a dot-separated sequence beginning with the name of
+the resource, and ending with the name of the attribute to
+set.
+
+Usage:
+...............
+set <path> <value>
+...............
+Examples:
+...............
+set vip1.ip 192.168.20.5
+set vm-a.force_stop 1
+...............
+
+[[cmdhelp_configure_show,display CIB objects]]
+==== `show`
+
+The `show` command displays CIB objects. Without any argument, it
+displays all objects in the CIB, but the set of objects displayed by
+`show` can be limited to only objects with the given IDs or by using
+one or more of the special prefixes described below.
+
+The XML representation for the objects can be displayed by passing
++xml+ as the first argument.
+
+To show one or more specific objects, pass the object IDs as
+arguments.
+
+To show all objects of a certain type, use the +type:+ prefix.
+
+To show all objects in a tag, use the +tag:+ prefix.
+
+To show all constraints related to a primitive, use the +related:+ prefix.
+
+To show all modified objects, pass the argument +changed+.
+
+The prefixes can be used together on a single command line. For
+example, to show both the tag itself and the objects tagged by it the
+following combination can be used: +show tag:my-tag my-tag+.
+
+To refine a selection of objects using multiple modifiers, the keywords
++and+ and +or+ can be used. For example, to select all primitives tagged
++foo+, the following combination can be used:
++show type:primitive and tag:foo+.
+
+To hide values when displaying the configuration, use the
++obscure:<glob>+ argument. This can be useful when sending the
+configuration over a public channel, to avoid exposing potentially
+sensitive information. The +<glob>+ argument is a bash-style pattern
+matching attribute keys.
+
+Usage:
+...............
+show [xml] [<id>
+ | changed
+ | type:<type>
+ | tag:<id>
+ | related:<obj>
+ | obscure:<glob>
+ ...]
+
+type :: node | primitive | group | clone | ms | rsc_template
+ | location | colocation | order
+ | rsc_ticket
+ | property | rsc_defaults | op_defaults
+ | fencing_topology
+ | role | user | acl_target
+ | tag
+...............
+
+Example:
+...............
+show webapp
+show type:primitive
+show xml tag:db tag:fs
+show related:webapp
+show type:primitive obscure:passwd
+...............
+
+[[cmdhelp_configure_tag,Define resource tags]]
+==== `tag`
+
+Define a resource tag. A tag is an id referring to one or more
+resources, without implying any constraints between the tagged
+resources. This can be useful for grouping conceptually related
+resources.
+
+Usage:
+...............
+tag <tag-name>: <rsc> [<rsc> ...]
+tag <tag-name> <rsc> [<rsc> ...]
+...............
+Example:
+...............
+tag web: p-webserver p-vip
+tag ips server-vip admin-vip
+...............
+
+[[cmdhelp_configure_template,edit and import a configuration from a template]]
+==== `template`
+
+The specified template is loaded into the editor. It's up to the
+user to make a good CRM configuration out of it. See also the
+<<cmdhelp_template,template section>>.
+
+Usage:
+...............
+template [xml] url
+...............
+Example:
+...............
+template two-apaches.txt
+...............
+
+[[cmdhelp_configure_upgrade,upgrade the CIB]]
+==== `upgrade`
+
+Attempts to upgrade the CIB to validate with the current
+version. Commonly, this is required if the error
+`CIB not supported` occurs. It typically means that the
+active CIB version is coming from an older release.
+
+As a safety precaution, the force argument is required if the
++validation-with+ attribute is set to anything other than
++0.6+. Thus in most cases, it is required.
+
+Usage:
+...............
+upgrade [force]
+...............
+
+Example:
+...............
+upgrade force
+...............
+
+[[cmdhelp_configure_user,define user access rights]]
+==== `user`
+
+Users which normally cannot view or manage cluster configuration
+can be allowed access to parts of the CIB. The access is defined
+by a set of +read+, +write+, and +deny+ rules as in role
+definitions or by referencing roles. The latter is considered
+best practice.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+user <uid> {roles|rules}
+
+roles :: role:<role-ref> [role:<role-ref> ...]
+rules :: rule [rule ...]
+...............
+Example:
+...............
+user joe \
+ role:app1_admin \
+ role:read_all
+...............
+
+[[cmdhelp_configure_validate_all,call agent validate-all for resource]]
+==== `validate-all`
+
+Call the `validate-all` action for the resource, if possible.
+
+Limitations:
+
+* The resource agent must implement the `validate-all` action.
+* The current user must be root.
+* The primitive resource must not use nvpair references.
+
+Usage:
+...............
+validate-all <rsc>
+...............
+
+
+[[cmdhelp_configure_verify,verify the CIB with crm_verify]]
+==== `verify`
+
+Verify the contents of the CIB which would be committed.
+
+Usage:
+...............
+verify
+...............
+
+[[cmdhelp_configure_xml,raw xml]]
+==== `xml`
+
+Even though we promissed no xml, it may happen, but hopefully
+very very seldom, that an element from the CIB cannot be rendered
+in the configuration language. In that case, the element will be
+shown as raw xml, prefixed by this command. That element can then
+be edited like any other. If the shell finds out that after the
+change it can digest it, then it is going to be converted into
+the normal configuration language. Otherwise, there is no need to
+use `xml` for configuration.
+
+Usage:
+...............
+xml <xml>
+...............
+
+[[cmdhelp_template,edit and import a configuration from a template]]
+=== `template` - Import configuration from templates
+
+User may be assisted in the cluster configuration by templates
+prepared in advance. Templates consist of a typical ready
+configuration which may be edited to suit particular user needs.
+
+This command enters a template level where additional commands
+for configuration/template management are available.
+
+[[cmdhelp_template_apply,process and apply the current configuration to the current CIB]]
+==== `apply`
+
+Copy the current or given configuration to the current CIB. By
+default, the CIB is replaced, unless the method is set to
+"update".
+
+Usage:
+...............
+apply [<method>] [<config>]
+
+method :: replace | update
+...............
+
+[[cmdhelp_template_delete,delete a configuration]]
+==== `delete`
+
+Remove a configuration. The loaded (active) configuration may be
+removed by force.
+
+Usage:
+...............
+delete <config> [force]
+...............
+
+[[cmdhelp_template_edit,edit a configuration]]
+==== `edit`
+
+Edit current or given configuration using your favourite editor.
+
+Usage:
+...............
+edit [<config>]
+...............
+
+[[cmdhelp_template_list,list configurations/templates]]
+==== `list`
+
+When called with no argument, lists existing templates and
+configurations.
+
+Given the argument +templates+, lists the available templates.
+
+Given the argument +configs+, lists the available configurations.
+
+Usage:
+...............
+list [templates|configs]
+...............
+
+[[cmdhelp_template_load,load a configuration]]
+==== `load`
+
+Load an existing configuration. Further `edit`, `show`, and
+`apply` commands will refer to this configuration.
+
+Usage:
+...............
+load <config>
+...............
+
+[[cmdhelp_template_new,create a new configuration from templates]]
+==== `new`
+
+Create a new configuration from one or more templates. Note that
+configurations and templates are kept in different places, so it
+is possible to have a configuration name equal a template name.
+
+If you already know which parameters are required, you can set
+them directly on the command line.
+
+The parameter name +id+ is set by default to the name of the
+configuration.
+
+If no parameters are being set and you don't want a particular name
+for your configuration, you can call this command with a template name
+as the only parameter. A unique configuration name based on the
+template name will be generated.
+
+Usage:
+...............
+new [<config>] <template> [<template> ...] [params name=value ...]
+...............
+
+Example:
+...............
+new vip virtual-ip
+new bigfs ocfs2 params device=/dev/sdx8 directory=/bigfs
+new apache
+...............
+
+[[cmdhelp_template_show,show the processed configuration]]
+==== `show`
+
+Process the current or given configuration and display the result.
+
+Usage:
+...............
+show [<config>]
+...............
+
+[[cmdhelp_cibstatus,CIB status management and editing]]
+=== `cibstatus` - CIB status management and editing
+
+The `status` section of the CIB keeps the current status of nodes
+and resources. It is modified _only_ on events, i.e. when some
+resource operation is run or node status changes. For obvious
+reasons, the CRM has no user interface with which it is possible
+to affect the status section. From the user's point of view, the
+status section is essentially a read-only part of the CIB. The
+current status is never even written to disk, though it is
+available in the PE (Policy Engine) input files which represent
+the history of cluster motions. The current status may be read
+using the +cibadmin -Q+ command.
+
+It may sometimes be of interest to see how status changes would
+affect the Policy Engine. The set of `cibstatus` level commands
+allow the user to load status sections from various sources and
+then insert or modify resource operations or change nodes' state.
+
+The effect of those changes may then be observed by running the
+<<cmdhelp_configure_ptest,`ptest`>> command at the `configure` level
+or `simulate` and `run` commands at this level. The `ptest`
+runs with the user edited CIB whereas the latter two commands
+run with the CIB which was loaded along with the status section.
+
+The `simulate` and `run` commands as well as all status
+modification commands are implemented using `crm_simulate(8)`.
+
+[[cmdhelp_cibstatus_load,load the CIB status section]]
+==== `load`
+
+Load a status section from a file, a shadow CIB, or the running
+cluster. By default, the current (+live+) status section is
+modified. Note that if the +live+ status section is modified it
+is not going to be updated if the cluster status changes, because
+that would overwrite the user changes. To make `crm` drop changes
+and resume use of the running cluster status, run +load live+.
+
+All CIB shadow configurations contain the status section which is
+a snapshot of the status section taken at the time the shadow was
+created. Obviously, this status section doesn't have much to do
+with the running cluster status, unless the shadow CIB has just
+been created. Therefore, the `ptest` command by default uses the
+running cluster status section.
+
+Usage:
+...............
+load {<file>|shadow:<cib>|live}
+...............
+Example:
+...............
+load bug-12299.xml
+load shadow:test1
+...............
+
+[[cmdhelp_cibstatus_node,change node status]]
+==== `node`
+
+Change the node status. It is possible to throw a node out of
+the cluster, make it a member, or set its state to unclean.
+
++online+:: Set the +node_state+ `crmd` attribute to +online+
+and the +expected+ and +join+ attributes to +member+. The effect
+is that the node becomes a cluster member.
+
++offline+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to empty. This makes the node
+cleanly removed from the cluster.
+
++unclean+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to +member+. In this case the node
+has unexpectedly disappeared.
+
+Usage:
+...............
+node <node> {online|offline|unclean}
+...............
+Example:
+...............
+node xen-b unclean
+...............
+
+[[cmdhelp_cibstatus_op,edit outcome of a resource operation]]
+==== `op`
+
+Edit the outcome of a resource operation. This way you can
+tell CRM that it ran an operation and that the resource agent
+returned certain exit code. It is also possible to change the
+operation's status. In case the operation status is set to
+something other than +done+, the exit code is effectively
+ignored.
+
+Usage:
+...............
+op <operation> <resource> <exit_code> [<op_status>] [<node>]
+
+operation :: probe | monitor[:<n>] | start | stop |
+ promote | demote | notify | migrate_to | migrate_from
+exit_code :: <rc> | success | generic | args |
+ unimplemented | perm | installed | configured | not_running |
+ master | failed_master
+op_status :: pending | done | cancelled | timeout | notsupported | error
+
+n :: the monitor interval in seconds; if omitted, the first
+ recurring operation is referenced
+rc :: numeric exit code in range 0..9
+...............
+Example:
+...............
+op start d1 xen-b generic
+op start d1 xen-b 1
+op monitor d1 xen-b not_running
+op stop d1 xen-b 0 timeout
+...............
+
+[[cmdhelp_cibstatus_origin,display origin of the CIB status section]]
+==== `origin`
+
+Show the origin of the status section currently in use. This
+essentially shows the latest `load` argument.
+
+Usage:
+...............
+origin
+...............
+
+[[cmdhelp_cibstatus_quorum,set the quorum]]
+==== `quorum`
+
+Set the quorum value.
+
+Usage:
+...............
+quorum <bool>
+...............
+Example:
+...............
+quorum false
+...............
+
+[[cmdhelp_cibstatus_run,run policy engine]]
+==== `run`
+
+Run the policy engine with the edited status section.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+run [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+run
+...............
+
+[[cmdhelp_cibstatus_save,save the CIB status section]]
+==== `save`
+
+The current internal status section with whatever modifications
+were performed can be saved to a file or shadow CIB.
+
+If the file exists and contains a complete CIB, only the status
+section is going to be replaced and the rest of the CIB will
+remain intact. Otherwise, the current user edited configuration
+is saved along with the status section.
+
+Note that all modifications are saved in the source file as soon
+as they are run.
+
+Usage:
+...............
+save [<file>|shadow:<cib>]
+...............
+Example:
+...............
+save bug-12299.xml
+...............
+
+[[cmdhelp_cibstatus_show,show CIB status section]]
+==== `show`
+
+Show the current status section in the XML format. Brace yourself
+for some unreadable output. Add +changed+ option to get a human
+readable output of all changes.
+
+Usage:
+...............
+show [changed]
+...............
+
+[[cmdhelp_cibstatus_simulate,simulate cluster transition]]
+==== `simulate`
+
+Run the policy engine with the edited status section and simulate
+the transition.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+simulate [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+simulate
+...............
+
+[[cmdhelp_cibstatus_ticket,manage tickets]]
+==== `ticket`
+
+Modify the ticket status. Tickets can be granted and revoked.
+Granted tickets could be activated or put in standby.
+
+Usage:
+...............
+ticket <ticket> {grant|revoke|activate|standby}
+...............
+Example:
+...............
+ticket ticketA grant
+...............
+
+[[cmdhelp_assist,Configuration assistant]]
+=== `assist` - Configuration assistant
+
+The `assist` sublevel is a collection of helper
+commands that create or modify resources and
+constraints, to simplify the creation of certain
+configurations.
+
+For more information on individual commands, see
+the help text for those commands.
+
+[[cmdhelp_assist_template,Create template for primitives]]
+==== `template`
+
+This command takes a list of primitives as argument, and creates a new
+`rsc_template` for these primitives. It can only do this if the
+primitives do not already share a template and are of the same type.
+
+Usage:
+........
+template primitive-1 primitive-2 primitive-3
+........
+
+[[cmdhelp_assist_weak-bond,Create a weak bond between resources]]
+==== `weak-bond`
+
+A colocation between a group of resources says that the resources
+should be located together, but it also means that those resources are
+dependent on each other. If one of the resources fails, the others
+will be restarted.
+
+If this is not desired, it is possible to circumvent: By placing the
+resources in a non-sequential set and colocating the set with a dummy
+resource which is not monitored, the resources will be placed together
+but will have no further dependency on each other.
+
+This command creates both the constraint and the dummy resource needed
+for such a colocation.
+
+Usage:
+........
+weak-bond resource-1 resource-2
+........
+
+[[cmdhelp_maintenance,Maintenance mode commands]]
+=== `maintenance` - Maintenance mode commands
+
+Maintenance mode commands are commands that manipulate resources
+directly without going through the cluster infrastructure. Therefore,
+it is essential to ensure that the cluster does not attempt to monitor
+or manipulate the resources while these commands are being executed.
+
+To ensure this, these commands require that maintenance mode is set
+either for the particular resource, or for the whole cluster.
+
+[[cmdhelp_maintenance_action,Invoke a resource action]]
+==== `action`
+
+Invokes the given action for the resource. This is
+done directly via the resource agent, so the command must
+be issued while the cluster or the resource is in
+maintenance mode.
+
+Unless the action is `start` or `monitor`, the action must be invoked
+on the same node as where the resource is running. If the resource is
+running on multiple nodes, the command will fail.
+
+To use SSH for executing resource actions on multiple nodes, append
+`ssh` after the action name. This requires SSH access to be configured
+between the nodes and the parallax python package to be installed.
+
+Usage:
+...............
+action <rsc> <action>
+action <rsc> <action> ssh
+...............
+Example:
+...............
+action webserver reload
+action webserver monitor ssh
+...............
+
+[[cmdhelp_maintenance_off,Disable maintenance mode]]
+==== `off`
+
+Disables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+off
+off <rsc>
+...............
+Example:
+...............
+off rsc1
+...............
+
+[[cmdhelp_maintenance_on,Enable maintenance mode]]
+==== `on`
+
+Enables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+on
+on <rsc>
+...............
+Example:
+...............
+on rsc1
+...............
+
+[[cmdhelp_history,Cluster history]]
+=== `history` - Cluster history
+
+Examining Pacemaker's history is a particularly involved task. The
+number of subsystems to be considered, the complexity of the
+configuration, and the set of various information sources, most of
+which are not exactly human readable, keep analyzing resource or node
+problems accessible to only the most knowledgeable. Or, depending on
+the point of view, to the most persistent. The following set of
+commands has been devised in hope to make cluster history more
+accessible.
+
+Of course, looking at _all_ history could be time consuming regardless
+of how good the tools at hand are. Therefore, one should first say
+which period he or she wants to analyze. If not otherwise specified,
+the last hour is considered. Logs and other relevant information is
+collected using `crm report`. Since this process takes some time and
+we always need fresh logs, information is refreshed in a much faster
+way using the python parallax module. If +python-parallax+ is not
+found on the system, examining a live cluster is still possible --
+though not as comfortable.
+
+Apart from examining a live cluster, events may be retrieved from a
+report generated by `crm report` (see also the +-H+ option). In that
+case we assume that the period stretching the whole report needs to be
+investigated. Of course, it is still possible to further reduce the
+time range.
+
+If you have discovered an issue that you want to show someone else,
+you can use the `session pack` command to save the current session as
+a tarball, similar to those generated by `crm report`.
+
+In order to minimize the size of the tarball, and to make it easier
+for others to find the interesting events, it is recommended to limit
+the time frame which the saved session covers. This can be done using
+the `timeframe` command (example below).
+
+It is also possible to name the saved session using the `session save`
+command.
+
+Example:
+...............
+crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
+crm(live)history# session save strange_restart
+crm(live)history# session pack
+Report saved in .../strange_restart.tar.bz2
+crm(live)history#
+...............
+
+[[cmdhelp_history_detail,set the level of detail shown]]
+==== `detail`
+
+How much detail to show from the logs. Valid detail levels are either
+`0` or `1`, where `1` is the highest detail level. The default detail
+level is `0`.
+
+Usage:
+...............
+detail <detail_level>
+
+detail_level :: small integer (defaults to 0)
+...............
+Example:
+...............
+detail 1
+...............
+
+[[cmdhelp_history_diff,cluster states/transitions difference]]
+==== `diff`
+
+A transition represents a change in cluster configuration or
+state. Use `diff` to see what has changed between two
+transitions.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+diff <pe> <pe> [status] [html]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+diff 2066 2067
+diff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_history_events,Show events in log]]
+==== `events`
+
+By analysing the log output and looking for particular
+patterns, the `events` command helps sifting through
+the logs to find when particular events like resources
+changing state or node failure may have occurred.
+
+This can be used to generate a combined list of events
+from all nodes.
+
+Usage:
+...............
+events
+...............
+
+Example:
+...............
+events
+...............
+
+[[cmdhelp_history_exclude,exclude log messages]]
+==== `exclude`
+
+If a log is infested with irrelevant messages, those messages may
+be excluded by specifying a regular expression. The regular
+expressions used are Python extended. This command is additive.
+To drop all regular expressions, use +exclude clear+. Run
+`exclude` only to see the current list of regular expressions.
+Excludes are saved along with the history sessions.
+
+Usage:
+...............
+exclude [<regex>|clear]
+...............
+Example:
+...............
+exclude kernel.*ocfs2
+...............
+
+[[cmdhelp_history_graph,generate a directed graph from the PE file]]
+==== `graph`
+
+Create a graphviz graphical layout from the PE file (the
+transition). Every transition contains the cluster configuration
+which was active at the time. See also <<cmdhelp_configure_graph,generate a directed graph
+from configuration>>.
+
+Usage:
+...............
+graph <pe> [<gtype> [<file> [<img_format>]]]
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph -1
+graph 322 dot clu1.conf.dot
+graph 322 dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_history_info,Cluster information summary]]
+==== `info`
+
+The `info` command provides a summary of the information source, which
+can be either a live cluster snapshot or a previously generated
+report.
+
+Usage:
+...............
+info
+...............
+Example:
+...............
+info
+...............
+
+[[cmdhelp_history_latest,show latest news from the cluster]]
+==== `latest`
+
+The `latest` command shows a bit of recent history, more
+precisely whatever happened since the last cluster change (the
+latest transition). If the transition is running, the shell will
+first wait until it finishes.
+
+Usage:
+...............
+latest
+...............
+Example:
+...............
+latest
+...............
+
+[[cmdhelp_history_limit,limit timeframe to be examined]]
+==== `limit` (`timeframe`)
+
+This command can be used to modify the time span to examine. All
+history commands look at events within a certain time span.
+
+For the `live` source, the default time span is the _last hour_.
+
+There is no time span limit for the `hb_report` source.
+
+The time period is parsed by the `dateutil` python module. It
+covers a wide range of date formats. For instance:
+
+- 3:00 (today at 3am)
+- 15:00 (today at 3pm)
+- 2010/9/1 2pm (September 1st 2010 at 2pm)
+
+For more examples of valid time/date statements, please refer to the
+`python-dateutil` documentation:
+
+- https://dateutil.readthedocs.org/[dateutil.readthedocs.org]
+
+If the dateutil module is not available, then the time is parsed using
+strptime and only the kind as printed by `date(1)` is allowed:
+
+- Tue Sep 15 20:46:27 CEST 2010
+
+Usage:
+...............
+limit [<from_time>] [<to_time>]
+...............
+Examples:
+...............
+limit 10:15
+limit 15h22m 16h
+limit "Sun 5 20:46" "Sun 5 22:00"
+...............
+
+[[cmdhelp_history_log,log content]]
+==== `log`
+
+Show messages logged on one or more nodes. Leaving out a node
+name produces combined logs of all nodes. Messages are sorted by
+time and, if the terminal emulations supports it, displayed in
+different colours depending on the node to allow for easier
+reading.
+
+The sorting key is the timestamp as written by syslog which
+normally has the maximum resolution of one second. Obviously,
+messages generated by events which share the same timestamp may
+not be sorted in the same way as they happened. Such close events
+may actually happen fairly often.
+
+Usage:
+...............
+log [<node> [<node> ...] ]
+...............
+Example:
+...............
+log node-a
+...............
+
+[[cmdhelp_history_node,node events]]
+==== `node`
+
+Show important events that happened on a node. Important events
+are node lost and join, standby and online, and fence. Use either
+node names or extended regular expressions.
+
+Usage:
+...............
+node <node> [<node> ...]
+...............
+Example:
+...............
+node node1
+...............
+
+[[cmdhelp_history_peinputs,list or get PE input files]]
+==== `peinputs`
+
+Every event in the cluster results in generating one or more
+Policy Engine (PE) files. These files describe future motions of
+resources. The files are listed as full paths in the current
+report directory. Add +v+ to also see the creation time stamps.
+
+Usage:
+...............
+peinputs [{<range>|<number>} ...] [v]
+
+range :: <n1>:<n2>
+...............
+Example:
+...............
+peinputs
+peinputs 440:444 446
+peinputs v
+...............
+
+[[cmdhelp_history_refresh,refresh live report]]
+==== `refresh`
+
+This command makes sense only for the +live+ source and makes
+`crm` collect the latest logs and other relevant information from
+the logs. If you want to make a completely new report, specify
++force+.
+
+Usage:
+...............
+refresh [force]
+...............
+
+[[cmdhelp_history_resource,resource events]]
+==== `resource`
+
+Show actions and any failures that happened on all specified
+resources on all nodes. Normally, one gives resource names as
+arguments, but it is also possible to use extended regular
+expressions. Note that neither groups nor clones or master/slave
+names are ever logged. The resource command is going to expand
+all of these appropriately, so that clone instances or resources
+which are part of a group are shown.
+
+Usage:
+...............
+resource <rsc> [<rsc> ...]
+...............
+Example:
+...............
+resource bigdb public_ip
+resource my_.*_db2
+resource ping_clone
+...............
+
+[[cmdhelp_history_session,manage history sessions]]
+==== `session`
+
+Sometimes you may want to get back to examining a particular
+history period or bug report. In order to make that easier, the
+current settings can be saved and later retrieved.
+
+If the current history being examined is coming from a live
+cluster the logs, PE inputs, and other files are saved too,
+because they may disappear from nodes. For the existing reports
+coming from `hb_report`, only the directory location is saved
+(not to waste space).
+
+A history session may also be packed into a tarball which can
+then be sent to support.
+
+Leave out subcommand to see the current session.
+
+Usage:
+...............
+session [{save|load|delete} <name> | pack [<name>] | update | list]
+...............
+Examples:
+...............
+session save bnc966622
+session load rsclost-2
+session list
+...............
+
+[[cmdhelp_history_setnodes,set the list of cluster nodes]]
+==== `setnodes`
+
+In case the host this program runs on is not part of the cluster,
+it is necessary to set the list of nodes.
+
+Usage:
+...............
+setnodes node <node> [<node> ...]
+...............
+Example:
+...............
+setnodes node_a node_b
+...............
+
+[[cmdhelp_history_show,show status or configuration of the PE input file]]
+==== `show`
+
+Every transition is saved as a PE file. Use this command to
+render that PE file either as configuration or status. The
+configuration output is the same as `crm configure show`.
+
+Usage:
+...............
+show <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+show 2066
+show pe-input-2080.bz2 status
+...............
+
+[[cmdhelp_history_source,set source to be examined]]
+==== `source`
+
+Events to be examined can come from the current cluster or from a
+`hb_report` report. This command sets the source. `source live`
+sets source to the running cluster and system logs. If no source
+is specified, the current source information is printed.
+
+In case a report source is specified as a file reference, the file
+is going to be unpacked in place where it resides. This directory
+is not removed on exit.
+
+Usage:
+...............
+source [<dir>|<file>|live]
+...............
+Examples:
+...............
+source live
+source /tmp/customer_case_22.tar.bz2
+source /tmp/customer_case_22
+source
+...............
+
+[[cmdhelp_history_transition,show transition]]
+==== `transition`
+
+This command will print actions planned by the PE and run
+graphviz (`dotty`) to display a graphical representation of the
+transition. Of course, for the latter an X11 session is required.
+This command invokes `ptest(8)` in background.
+
+The +showdot+ subcommand runs graphviz (`dotty`) to display a
+graphical representation of the +.dot+ file which has been
+included in the report. Essentially, it shows the calculation
+produced by `pengine` which is installed on the node where the
+report was produced. In optimal case this output should not
+differ from the one produced by the locally installed `pengine`.
+
+The `log` subcommand shows the full log for the duration of the
+transition.
+
+A transition can also be saved to a CIB shadow for further
+analysis or use with `cib` or `configure` commands (use the
+`save` subcommand). The shadow file name defaults to the name of
+the PE input file.
+
+If the PE input file number is not provided, it defaults to the
+last one, i.e. the last transition. The last transition can also
+be referenced with number 0. If the number is negative, then the
+corresponding transition relative to the last one is chosen.
+
+If there are warning and error PE input files or different nodes
+were the DC in the observed timeframe, it may happen that PE
+input file numbers collide. In that case provide some unique part
+of the path to the file.
+
+After the `ptest` output, logs about events that happened during
+the transition are printed.
+
+The `tags` subcommand scans the logs for the transition and return a
+list of key events during that transition. For example, the tag
++error+ will be returned if there are any errors logged during the
+transition.
+
+Usage:
+...............
+transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+transition showdot [<number>|<index>|<file>]
+transition log [<number>|<index>|<file>]
+transition save [<number>|<index>|<file> [name]]
+transition tags [<number>|<index>|<file>]
+...............
+Examples:
+...............
+transition
+transition 444
+transition -1
+transition pe-error-3.bz2
+transition node-a/pengine/pe-input-2.bz2
+transition showdot 444
+transition log
+transition save 0 enigma-22
+...............
+
+[[cmdhelp_history_transitions,List transitions]]
+==== `transitions`
+
+A transition represents a change in cluster configuration or
+state. This command lists the transitions in the current timeframe.
+
+Usage:
+...............
+transitions
+...............
+Example:
+...............
+transitions
+...............
+
+
+[[cmdhelp_history_wdiff,cluster states/transitions difference]]
+==== `wdiff`
+
+A transition represents a change in cluster configuration or
+state. Use `wdiff` to see what has changed between two
+transitions as word differences on a line-by-line basis.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+wdiff <pe> <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+wdiff 2066 2067
+wdiff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_root_report,Create cluster status report]]
+=== `report`
+
+Interface to a tool for creating a cluster report. A report is an
+archive containing log files, configuration files, system information
+and other relevant data for a given time period. This is a useful tool
+for collecting data to attach to bug reports, or for detecting the
+root cause of errors resulting in resource failover, for example.
+
+See `crmsh_hb_report(8)` for more details on arguments,
+or call `crm report -h`
+
+Usage:
+...............
+report -f {time|"cts:"testnum} [-t time] [-u user] [-l file]
+ [-n nodes] [-E files] [-p patt] [-L patt] [-e prog]
+ [-MSDZAVsvhd] [dest]
+...............
+
+Examples:
+...............
+report -f 2pm report_1
+report -f "2007/9/5 12:30" -t "2007/9/5 14:00" report_2
+report -f 1:00 -t 3:00 -l /var/log/cluster/ha-debug report_3
+report -f "09sep07 2:00" -u hbadmin report_4
+report -f 18:00 -p "usern.*" -p "admin.*" report_5
+report -f cts:133 ctstest_133
+...............
+
+=== `end` (`cd`, `up`)
+
+The `end` command ends the current level and the user moves to
+the parent level. This command is available everywhere.
+
+Usage:
+...............
+end
+...............
+
+=== `help`
+
+The `help` command prints help for the current level or for the
+specified topic (command). This command is available everywhere.
+
+Usage:
+...............
+help [<topic>]
+...............
+
+=== `quit` (`exit`, `bye`)
+
+Leave the program.
+
+BUGS
+----
+Even though all sensible configurations (and most of those that
+are not) are going to be supported by the crm shell, I suspect
+that it may still happen that certain XML constructs may confuse
+the tool. When that happens, please file a bug report.
+
+The crm shell will not try to update the objects it does not
+understand. Of course, it is always possible to edit such objects
+in the XML format.
+
+AUTHORS
+-------
+Dejan Muhamedagic, <dejan@suse.de>
+Kristoffer Gronlund <kgronlund@suse.com>
+and many OTHERS
+
+SEE ALSO
+--------
+crm_resource(8), crm_attribute(8), crm_mon(8), cib_shadow(8),
+ptest(8), dotty(1), crm_simulate(8), cibadmin(8)
+
+
+COPYING
+-------
+Copyright \(C) 2008-2013 Dejan Muhamedagic.
+Copyright \(C) 2013 Kristoffer Gronlund.
+
+Free use of this software is granted under the terms of the GNU General Public License (GPL).
+
+//////////////////////
+ vim:ts=4:sw=4:expandtab:
+//////////////////////
diff --git a/doc/website-v1/man-3.adoc b/doc/website-v1/man-3.adoc
new file mode 100644
index 0000000..e4411cc
--- /dev/null
+++ b/doc/website-v1/man-3.adoc
@@ -0,0 +1,5309 @@
+:man source: crm
+:man version: 2.3.0
+:man manual: crmsh documentation
+
+crm(8)
+======
+
+NAME
+----
+crm - Pacemaker command line interface for configuration and management
+
+
+SYNOPSIS
+--------
+*crm* [OPTIONS] [SUBCOMMAND ARGS...]
+
+
+[[topics_Description,Program description]]
+DESCRIPTION
+-----------
+The `crm` shell is a command-line based cluster configuration and
+management tool. Its goal is to assist as much as possible with the
+configuration and maintenance of Pacemaker-based High Availability
+clusters.
+
+For more information on Pacemaker itself, see http://clusterlabs.org/.
+
+`crm` works both as a command-line tool to be called directly from the
+system shell, and as an interactive shell with extensive tab
+completion and help.
+
+The primary focus of the `crm` shell is to provide a simplified and
+consistent interface to Pacemaker, but it also provides tools for
+managing the creation and configuration of High Availability clusters
+from scratch. To learn more about this aspect of `crm`, see the
+`cluster` section below.
+
+The `crm` shell can be used to manage every aspect of configuring and
+maintaining a cluster. It provides a simplified line-based syntax on
+top of the XML configuration format used by Pacemaker, commands for
+starting and stopping resources, tools for exploring the history of a
+cluster including log scraping and a set of cluster scripts useful for
+automating the setup and installation of services on the cluster
+nodes.
+
+The `crm` shell is line oriented: every command must start and finish
+on the same line. It is possible to use a continuation character (+\+)
+to write one command in two or more lines. The continuation character
+is commonly used when displaying configurations.
+
+[[topics_CommandLine,Command line options]]
+OPTIONS
+-------
+*-f, --file*='FILE'::
+ Load commands from the given file. If a dash +-+ is used in place
+ of a file name, `crm` will read commands from the shell standard
+ input (`stdin`).
+
+*-c, --cib*='CIB'::
+ Start the session using the given shadow CIB file.
+ Equivalent to +cib use <CIB>+.
+
+*-D, --display=*'OUTPUT_TYPE'::
+ Choose one of the output options: +plain+, +color-always+, +color+,
+ or +uppercase+. The default is +color+ if the terminal emulation
+ supports colors. Otherwise, +plain+ is used.
+
+*-F, --force*::
+ Make `crm` proceed with applying changes where it would normally
+ ask the user to confirm before proceeding. This option is mainly
+ useful in scripts, and should be used with care.
+
+*-w, --wait*::
+ Make `crm` wait for the cluster transition to finish (for the
+ changes to take effect) after each processed line.
+
+*-H, --history*='DIR|FILE|SESSION'::
+ A directory or file containing a cluster report to load
+ into the `history` commands, or the name of a previously
+ saved history session.
+
+*-h, --help*::
+ Print help page.
+
+*--version*::
+ Print crmsh version and build information (Mercurial Hg changeset
+ hash).
+
+*-d, --debug*::
+ Print verbose debugging information.
+
+*-R, --regression-tests*::
+ Enables extra verbose trace logging used by the regression
+ tests. Logs all external calls made by crmsh.
+
+*--scriptdir*='DIR'::
+ Extra directory where crm looks for cluster scripts, or a list of
+ directories separated by semi-colons (e.g. +/dir1;/dir2;etc.+).
+
+*-o, --opt*='OPTION=VALUE'::
+ Set crmsh option temporarily. If the options are saved using
+ +options save+ then the value passed here will also be saved.
+ Multiple options can be set by using +-o+ multiple times.
+
+[[topics_Introduction,Introduction]]
+== Introduction
+
+This section of the user guide covers general topics about the user
+interface and describes some of the features of `crmsh` in detail.
+
+[[topics_Introduction_Interface,User interface]]
+=== User interface
+
+The main purpose of `crmsh` is to provide a simple yet powerful
+interface to the cluster stack. There are two main modes of operation
+with the user interface of `crmsh`:
+
+* Command line (single-shot) use - Use `crm` as a regular UNIX command
+ from your usual shell. `crm` has full bash completion built in, so
+ using it in this manner should be as comfortable and familiar as
+ using any other command-line tool.
+
+* Interactive mode - By calling `crm` without arguments, or by calling
+ it with only a sublevel as argument, `crm` enters the interactive
+ mode. In this mode, it acts as its own command shell, which
+ remembers which sublevel you are currently in and allows for rapid
+ and convenient execution of multiple commands within the same
+ sublevel. This mode also has full tab completion, as well as
+ built-in interactive help and syntax highlighting.
+
+Here are a few examples of using `crm` both as a command-line tool and
+as an interactive shell:
+
+.Command line (one-shot) use:
+........
+# crm resource stop www_app
+........
+
+.Interactive use:
+........
+# crm
+crm(live)# resource
+crm(live)resource# unmanage tetris_1
+crm(live)resource# up
+crm(live)# node standby node4
+........
+
+.Cluster configuration:
+........
+# crm configure<<EOF
+ #
+ # resources
+ #
+ primitive disk0 iscsi \
+ params portal=192.168.2.108:3260 target=iqn.2008-07.com.suse:disk0
+ primitive fs0 Filesystem \
+ params device=/dev/disk/by-label/disk0 directory=/disk0 fstype=ext3
+ primitive internal_ip IPaddr params ip=192.168.1.101
+ primitive apache apache \
+ params configfile=/disk0/etc/apache2/site0.conf
+ primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s
+ primitive pingd pingd \
+ params name=pingd dampen=5s multiplier=100 host_list="r1 r2"
+ #
+ # monitor apache and the UPS
+ #
+ monitor apache 60s:30s
+ monitor apcfence 120m:60s
+ #
+ # cluster layout
+ #
+ group internal_www \
+ disk0 fs0 internal_ip apache
+ clone fence apcfence \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ clone conn pingd \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ location node_pref internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+ #
+ # cluster properties
+ #
+ property stonith-enabled=true
+ commit
+EOF
+........
+
+The `crm` interface is hierarchical, with commands organized into
+separate levels by functionality. To list the available levels and
+commands, either execute +help <level>+, or, if at the top level of
+the shell, simply typing `help` will provide an overview of all
+available levels and commands.
+
+The +(live)+ string in the `crm` prompt signifies that the current CIB
+in use is the cluster live configuration. It is also possible to
+work with so-called <<topics_Features_Shadows,shadow CIBs>>. These are separate, inactive
+configurations stored in files, that can be applied and thereby
+replace the live configuration at any time.
+
+[[topics_Introduction_Completion,Tab completion]]
+=== Tab completion
+
+The `crm` makes extensive use of tab completion. The completion
+is both static (i.e. for `crm` commands) and dynamic. The latter
+takes into account the current status of the cluster or
+information from installed resource agents. Sometimes, completion
+may also be used to get short help on resource parameters. Here
+are a few examples:
+
+...............
+crm(live)resource# <TAB><TAB>
+bye failcount move restart unmigrate
+cd help param show unmove
+cleanup list promote start up
+demote manage quit status utilization
+end meta refresh stop
+exit migrate reprobe unmanage
+
+crm(live)configure# primitive fence-1 <TAB><TAB>
+heartbeat: lsb: ocf: stonith:
+
+crm(live)configure# primitive fence-1 stonith:<TAB><TAB>
+apcmaster external/ippower9258 fence_legacy
+apcmastersnmp external/kdumpcheck ibmhmc
+apcsmart external/libvirt ipmilan
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params <TAB><TAB>
+auth= hostname= ipaddr= login= password= port= priv=
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params auth=<TAB><TAB>
+auth* (string)
+ The authorization type of the IPMI session ("none", "straight", "md2", or "md5")
+...............
+
+`crmsh` also comes with bash completion usable directly from the
+system shell. This should be installed automatically with the command
+itself.
+
+[[topics_Introduction_Shorthand,Shorthand syntax]]
+=== Shorthand syntax
+
+When using the `crm` shell to manage clusters, you will end up typing
+a lot of commands many times over. Clear command names like
++configure+ help in understanding and learning to use the cluster
+shell, but is easy to misspell and is tedious to type repeatedly. The
+interactive mode and tab completion both help with this, but the `crm`
+shell also has the ability to understand a variety of shorthand
+aliases for all of the commands.
+
+For example, instead of typing `crm status`, you can type `crm st` or
+`crm stat`. Instead of `crm configure` you can type `crm cfg` or even
+`crm cf`. `crm resource` can be shorted as `crm rsc`, and so on.
+
+The exact list of accepted aliases is too long to print in full, but
+experimentation and typos should help in discovering more of them.
+
+[[topics_Features,Features]]
+== Features
+
+The feature set of crmsh covers a wide range of functionality, and
+understanding how and when to use the various features of the shell
+can be difficult. This section of the guide describes some of the
+features and use cases of `crmsh` in more depth. The intention is to
+provide a deeper understanding of these features, but also to serve as
+a guide to using them.
+
+[[topics_Features_Shadows,Shadow CIB usage]]
+=== Shadow CIB usage
+
+A Shadow CIB is a normal cluster configuration stored in a file.
+They may be manipulated in much the same way as the _live_ CIB, with
+the key difference that changes to a shadow CIB have no effect on the
+actual cluster resources. An administrator may choose to apply any of
+them to the cluster, thus replacing the running configuration with the
+one found in the shadow CIB.
+
+The `crm` prompt always contains the name of the configuration which
+is currently in use, or the string _live_ if using the live cluster
+configuration.
+
+When editing the configuration in the `configure` level, no changes
+are actually applied until the `commit` command is executed. It is
+possible to start editing a configuration as usual, but instead of
+committing the changes to the active CIB, save them to a shadow CIB.
+
+The following example `configure` session demonstrates how this can be
+done:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+
+[[topics_Features_Checks,Configuration semantic checks]]
+=== Configuration semantic checks
+
+Resource definitions may be checked against the meta-data
+provided with the resource agents. These checks are currently
+carried out:
+
+- are required parameters set
+- existence of defined parameters
+- timeout values for operations
+
+The parameter checks are obvious and need no further explanation.
+Failures in these checks are treated as configuration errors.
+
+The timeouts for operations should be at least as long as those
+recommended in the meta-data. Too short timeout values are a
+common mistake in cluster configurations and, even worse, they
+often slip through if cluster testing was not thorough. Though
+operation timeouts issues are treated as warnings, make sure that
+the timeouts are usable in your environment. Note also that the
+values given are just _advisory minimum_---your resources may
+require longer timeouts.
+
+User may tune the frequency of checks and the treatment of errors
+by the <<cmdhelp_options_check-frequency,`check-frequency`>> and
+<<cmdhelp_options_check-mode,`check-mode`>> preferences.
+
+Note that if the +check-frequency+ is set to +always+ and the
++check-mode+ to +strict+, errors are not tolerated and such
+configuration cannot be saved.
+
+[[topics_Features_Templates,Configuration templates]]
+=== Configuration templates
+
+.Deprecation note
+****************************
+Configuration templates have been deprecated in favor of the more
+capable `cluster scripts`. To learn how to use cluster scripts, see
+the dedicated documentation on the `crmsh` website at
+http://crmsh.github.io/, or in the <<cmdhelp_script,Script section>>.
+****************************
+
+Configuration templates are ready made configurations created by
+cluster experts. They are designed in such a way so that users
+may generate valid cluster configurations with minimum effort.
+If you are new to Pacemaker, templates may be the best way to
+start.
+
+We will show here how to create a simple yet functional Apache
+configuration:
+...............
+# crm configure
+crm(live)configure# template
+crm(live)configure template# list templates
+apache filesystem virtual-ip
+crm(live)configure template# new web <TAB><TAB>
+apache filesystem virtual-ip
+crm(live)configure template# new web apache
+INFO: pulling in template apache
+INFO: pulling in template virtual-ip
+crm(live)configure template# list
+web2-d web2 vip2 web3 vip web
+...............
+
+We enter the `template` level from `configure`. Use the `list`
+command to show templates available on the system. The `new`
+command creates a configuration from the +apache+ template. You
+can use tab completion to pick templates. Note that the apache
+template depends on a virtual IP address which is automatically
+pulled along. The `list` command shows the just created +web+
+configuration, among other configurations (I hope that you,
+unlike me, will use more sensible and descriptive names).
+
+The `show` command, which displays the resulting configuration,
+may be used to get an idea about the minimum required changes
+which have to be done. All +ERROR+ messages show the line numbers
+in which the respective parameters are to be defined:
+...............
+crm(live)configure template# show
+ERROR: 23: required parameter ip not set
+ERROR: 61: required parameter id not set
+ERROR: 65: required parameter configfile not set
+crm(live)configure template# edit
+...............
+
+The `edit` command invokes the preferred text editor with the
++web+ configuration. At the top of the file, the user is advised
+how to make changes. A good template should require from the user
+to specify only parameters. For example, the +web+ configuration
+we created above has the following required and optional
+parameters (all parameter lines start with +%%+):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip
+31:%% netmask
+35:%% lvs_support
+61:%% id
+65:%% configfile
+71:%% options
+76:%% envfiles
+...............
+
+These lines are the only ones that should be modified. Simply
+append the parameter value at the end of the line. For instance,
+after editing this template, the result could look like this (we
+used tabs instead of spaces to make the values stand out):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip 192.168.1.101
+31:%% netmask
+35:%% lvs_support
+61:%% id websvc
+65:%% configfile /etc/apache2/httpd.conf
+71:%% options
+76:%% envfiles
+...............
+
+As you can see, the parameter line format is very simple:
+...............
+%% <name> <value>
+...............
+
+After editing the file, use `show` again to display the
+configuration:
+...............
+crm(live)configure template# show
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf"
+monitor apache 120s:60s
+group websvc \
+ apache virtual-ip
+...............
+
+The target resource of the apache template is a group which we
+named +websvc+ in this sample session.
+
+This configuration looks exactly as you could type it at the
+`configure` level. The point of templates is to save you some
+typing. It is important, however, to understand the configuration
+produced.
+
+Finally, the configuration may be applied to the current
+crm configuration (note how the configuration changed slightly,
+though it is still equivalent, after being digested at the
+`configure` level):
+...............
+crm(live)configure template# apply
+crm(live)configure template# cd ..
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache virtual-ip
+...............
+
+Note that this still does not commit the configuration to the CIB
+which is used in the shell, either the running one (+live+) or
+some shadow CIB. For that you still need to execute the `commit`
+command.
+
+To complete our example, we should also define the preferred node
+to run the service:
+
+...............
+crm(live)configure# location websvc-pref websvc 100: xen-b
+...............
+
+If you are not happy with some resource names which are provided
+by default, you can rename them now:
+
+...............
+crm(live)configure# rename virtual-ip intranet-ip
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive intranet-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+To summarize, working with templates typically consists of the
+following steps:
+
+- `new`: create a new configuration from templates
+- `edit`: define parameters, at least the required ones
+- `show`: see if the configuration is valid
+- `apply`: apply the configuration to the `configure` level
+
+[[topics_Features_Testing,Resource testing]]
+=== Resource testing
+
+The amount of detail in a cluster makes all configurations prone
+to errors. By far the largest number of issues in a cluster is
+due to bad resource configuration. The shell can help quickly
+diagnose such problems. And considerably reduce your keyboard
+wear.
+
+Let's say that we entered the following configuration:
+...............
+node xen-b
+node xen-c
+node xen-d
+primitive fencer stonith:external/libvirt \
+ params hypervisor_uri="qemu+tcp://10.2.13.1/system" \
+ hostlist="xen-b xen-c xen-d" \
+ op monitor interval=2h
+primitive svc Xinetd \
+ params service=systat \
+ op monitor interval=30s
+primitive intranet-ip IPaddr2 \
+ params ip=10.2.13.100 \
+ op monitor interval=30s
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+Before typing `commit` to submit the configuration to the cib we
+can make sure that all resources are usable on all nodes:
+...............
+crm(live)configure# rsctest websvc svc fencer
+...............
+
+It is important that resources being tested are not running on
+any nodes. Otherwise, the `rsctest` command will refuse to do
+anything. Of course, if the current configuration resides in a
+CIB shadow, then a `commit` is irrelevant. The point being that
+resources are not running on any node.
+
+.Note on stopping all resources
+****************************
+Alternatively to not committing a configuration, it is also
+possible to tell Pacemaker not to start any resources:
+
+...............
+crm(live)configure# property stop-all-resources=yes
+...............
+Almost none---resources of class stonith are still started. But
+shell is not as strict when it comes to stonith resources.
+****************************
+
+Order of resources is significant insofar that a resource depends
+on all resources to its left. In most configurations, it's
+probably practical to test resources in several runs, based on
+their dependencies.
+
+Apart from groups, `crm` does not interpret constraints and
+therefore knows nothing about resource dependencies. It also
+doesn't know if a resource can run on a node at all in case of an
+asymmetric cluster. It is up to the user to specify a list of
+eligible nodes if a resource is not meant to run on every node.
+
+[[topics_Features_Security,Access Control Lists (ACL)]]
+=== Access Control Lists (ACL)
+
+.Note on ACLs in Pacemaker 1.1.12
+****************************
+The support for ACLs has been revised in Pacemaker version 1.1.12 and
+up. Depending on which version you are using, the information in this
+section may no longer be accurate. Look for the `acl_target`
+configuration element for more details on the new syntax.
+****************************
+
+By default, the users from the +haclient+ group have full access
+to the cluster (or, more precisely, to the CIB). Access control
+lists allow for finer access control to the cluster.
+
+Access control lists consist of an ordered set of access rules.
+Each rule allows read or write access or denies access
+completely. Rules are typically combined to produce a specific
+role. Then, users may be assigned a role.
+
+For instance, this is a role which defines a set of rules
+allowing management of a single resource:
+
+...............
+role bigdb_admin \
+ write meta:bigdb:target-role \
+ write meta:bigdb:is-managed \
+ write location:bigdb \
+ read ref:bigdb
+...............
+
+The first two rules allow modifying the +target-role+ and
++is-managed+ meta attributes which effectively enables users in
+this role to stop/start and manage/unmanage the resource. The
+constraints write access rule allows moving the resource around.
+Finally, the user is granted read access to the resource
+definition.
+
+For proper operation of all Pacemaker programs, it is advisable
+to add the following role to all users:
+
+...............
+role read_all \
+ read cib
+...............
+
+For finer grained read access try with the rules listed in the
+following role:
+
+...............
+role basic_read \
+ read node attribute:uname \
+ read node attribute:type \
+ read property \
+ read status
+...............
+
+It is however possible that some Pacemaker programs (e.g.
+`ptest`) may not function correctly if the whole CIB is not
+readable.
+
+Some of the ACL rules in the examples above are expanded by the
+shell to XPath specifications. For instance,
++meta:bigdb:target-role+ expands to:
+
+........
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+........
+
+You can see the expansion by showing XML:
+
+...............
+crm(live) configure# show xml bigdb_admin
+...
+<acls>
+ <acl_role id="bigdb_admin">
+ <write id="bigdb_admin-write"
+ xpath="//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']"/>
+...............
+
+Many different XPath expressions can have equal meaning. For
+instance, the following two are equal, but only the first one is
+going to be recognized as shortcut:
+
+...............
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+//resources/primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+...............
+
+XPath is a powerful language, but you should try to keep your ACL
+xpaths simple and the builtin shortcuts should be used whenever
+possible.
+
+[[topics_Features_Resourcesets,Syntax: Resource sets]]
+=== Syntax: Resource sets
+
+Using resource sets can be a bit confusing unless one knows the
+details of the implementation in Pacemaker as well as how to interpret
+the syntax provided by `crmsh`.
+
+Three different types of resource sets are provided by `crmsh`, and
+each one implies different values for the two resource set attributes,
++sequential+ and +require-all+.
+
++sequential+::
+ If false, the resources in the set do not depend on each other
+ internally. Setting +sequential+ to +true+ implies a strict order of
+ dependency within the set.
+
++require-all+::
+ If false, only one resource in the set is required to fulfil the
+ requirements of the set. The set of A, B and C with +require-all+
+ set to +false+ is be read as "A OR B OR C" when its dependencies
+ are resolved.
+
+The three types of resource sets modify the attributes in the
+following way:
+
+1. Implicit sets (no brackets). +sequential=true+, +require-all=true+
+2. Parenthesis set (+(+ ... +)+). +sequential=false+, +require-all=true+
+3. Bracket set (+[+ ... +]+). +sequential=false+, +require-all=false+
+
+To create a set with the properties +sequential=true+ and
++require-all=false+, explicitly set +sequential+ in a bracketed set,
++[ A B C sequential=true ]+.
+
+To create multiple sets with both +sequential+ and +require-all+ set to
+true, explicitly set +sequential+ in a parenthesis set:
++A B ( C D sequential=true )+.
+
+[[topics_Features_AttributeListReferences,Syntax: Attribute list references]]
+=== Syntax: Attribute list references
+
+Attribute lists are used to set attributes and parameters for
+resources, constraints and property definitions. For example, to set
+the virtual IP used by an +IPAddr2+ resource the attribute +ip+ can be
+set in an attribute list for that resource.
+
+Attribute lists can have identifiers that name them, and other
+resources can reuse the same attribute list by referring to that name
+using an +$id-ref+. For example, the following statement defines a
+simple dummy resource with an attribute list which sets the parameter
++state+ to the value 1 and sets the identifier for the attribute list
+to +on-state+:
+
+..............
+primitive dummy-1 Dummy params $id=on-state state=1
+..............
+
+To refer to this attribute list from a different resource, refer to
+the +on-state+ name using an id-ref:
+
+..............
+primitive dummy-2 Dummy params $id-ref=on-state
+..............
+
+The resource +dummy-2+ will now also have the parameter +state+ set to the value 1.
+
+[[topics_Features_AttributeReferences,Syntax: Attribute references]]
+=== Syntax: Attribute references
+
+In some cases, referencing complete attribute lists is too
+coarse-grained, for example if two different parameters with different
+names should have the same value set. Instead of having to copy the
+value in multiple places, it is possible to create references to
+individual attributes in attribute lists.
+
+To name an attribute in order to be able to refer to it later, prefix
+the attribute name with a +$+ character (as seen above with the
+special names +$id+ and +$id-ref+:
+
+............
+primitive dummy-1 Dummy params $state=1
+............
+
+The identifier +state+ can now be used to refer to this attribute from other
+primitives, using the +@<id>+ syntax:
+
+............
+primitive dummy-2 Dummy params @state
+............
+
+In some cases, using the attribute name as the identifier doesn't work
+due to name clashes. In this case, the syntax +$<id>:<name>=<value>+
+can be used to give the attribute a different identifier:
+
+............
+primitive dummy-1 params $dummy-state-on:state=1
+primitive dummy-2 params @dummy-state-on
+............
+
+There is also the possibility that two resources both use the same
+attribute value but with different names. For example, a web server
+may have a parameter +server_ip+ for setting the IP address where it
+listens for incoming requests, and a virtual IP resource may have a
+parameter called +ip+ which sets the IP address it creates. To
+configure these two resources with an IP without repeating the value,
+the reference can be given a name using the syntax +@<id>:<name>+.
+
+Example:
+............
+primitive virtual-ip IPaddr2 params $vip:ip=192.168.1.100
+primitive webserver apache params @vip:server_ip
+............
+
+[[topics_Syntax_RuleExpressions,Syntax: Rule expressions]]
+=== Syntax: Rule expressions
+
+Many of the configuration commands in `crmsh` now support the use of
+_rule expressions_, which can influence what attributes apply to a
+resource or under which conditions a constraint is applied, depending
+on changing conditions like date, time, the value of attributes and
+more.
+
+Here is an example of a simple rule expression used to apply a
+a different resource parameter on the node named `node1`:
+
+..............
+primitive my_resource Special \
+ params 2: rule #uname eq node1 interface=eth1 \
+ params 1: interface=eth0
+..............
+
+This primitive resource has two lists of parameters with descending
+priority. The parameter list with the highest priority is applied
+first, but only if the rule expressions for that parameter list all
+apply. In this case, the rule `#uname eq node1` limits the parameter
+list so that it is only applied on `node1`.
+
+Note that rule expressions are not terminated and are immediately
+followed by the data to which the rule is applied. In this case, the
+name-value pair `interface=eth1`.
+
+Rule expressions can contain multiple expressions connected using the
+boolean operator `or` and `and`. The full syntax for rule expressions
+is listed below.
+
+..............
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: <string> | <version> | <number>
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+..............
+
+[[topics_Reference,Command reference]]
+== Command reference
+
+The commands are structured to be compatible with the shell command
+line. Sometimes, the underlying Pacemaker grammar uses characters that
+have special meaning in bash, that will need to be quoted. This
+includes the hash or pound sign (`#`), single and double quotes, and
+any significant whitespace.
+
+Whitespace is also significant when assigning values, meaning that
++key=value+ is different from +key = value+.
+
+Commands can be referenced using short-hand as long as the short-hand
+is unique. This can be either a prefix of the command name or a prefix
+string of characters found in the name.
+
+For example, +status+ can be abbreviated as +st+ or +su+, and
++configure+ as +conf+ or +cfg+.
+
+The syntax for the commands is given below in an informal, BNF-like
+grammar.
+
+* `<value>` denotes a string.
+* `[value]` means that the construct is optional.
+* The ellipsis (`...`) signifies that the previous construct may be
+ repeated.
+* `first|second` means either first or second.
+* The rest are literals (strings, `:`, `=`).
+
+[[cmdhelp_root_status,Cluster status]]
+=== `status`
+
+Show cluster status. The status is displayed by `crm_mon`. Supply
+additional arguments for more information or different format.
+See `crm_mon(8)` for more details.
+
+Example:
+...............
+status
+status simple
+status full
+...............
+
+Usage:
+...............
+status [<option> ...]
+
+option :: full
+ | bynode
+ | inactive
+ | ops
+ | timing
+ | failcounts
+ | verbose
+ | quiet
+ | html
+ | xml
+ | simple
+ | tickets
+ | noheaders
+ | detail
+ | brief
+...............
+
+[[cmdhelp_root_verify,Verify cluster status]]
+=== `verify`
+
+Performs basic checks for the cluster configuration and
+current status, reporting potential issues.
+
+See `crm_verify(8)` and `crm_simulate(8)` for more details.
+
+Example:
+...............
+verify
+verify scores
+...............
+
+Usage:
+...............
+verify [scores]
+...............
+
+
+[[cmdhelp_cluster,Cluster setup and management]]
+=== `cluster` - Cluster setup and management
+
+Whole-cluster configuration management with High Availability
+awareness.
+
+The commands on the cluster level allows configuration and
+modification of the underlying cluster infrastructure, and also
+supplies tools to do whole-cluster systems management.
+
+These commands enable easy installation and maintenance of a HA
+cluster, by providing support for package installation, configuration
+of the cluster messaging layer, file system setup and more.
+
+[[cmdhelp_cluster_add,Add a new node to the cluster]]
+==== `add`
+
+Add a new node to the cluster. The new node will be
+configured as a cluster member.
+
+Options:
+
+*-y, --yes*::
+ Answer "yes" to all prompts (use with caution)
+
+Usage:
+...............
+add [options] [<node> ...]
+...............
+
+[[cmdhelp_cluster_copy,Copy file to other cluster nodes]]
+==== `copy`
+
+Copy file to other cluster nodes.
+
+Copies the given file to all other nodes unless given a
+list of nodes to copy to as argument.
+
+Usage:
+...............
+copy <filename> [nodes ...]
+...............
+
+Example:
+...............
+copy /etc/motd
+...............
+
+[[cmdhelp_cluster_diff,Diff file across cluster]]
+==== `diff`
+
+Displays the difference, if any, between a given file
+on different nodes. If the second argument is `--checksum`,
+a checksum of the file will be calculated and displayed for
+each node.
+
+Usage:
+...............
+diff <file> [--checksum] [nodes...]
+...............
+
+Example:
+...............
+diff /etc/crm/crm.conf node2
+diff /etc/resolv.conf --checksum
+...............
+
+[[cmdhelp_cluster_geo_init,Configure cluster as geo cluster]]
+==== `geo-init`
+
+Create a new geo cluster with the current cluster as the
+first member. Pass the complete geo cluster topology as
+arguments to this command, and then use `geo-join` and
+`geo-init-arbitrator` to add the remaining members to
+the geo cluster.
+
+Options:
+
+*-q, --quiet*::
+ Be quiet (don't describe what's happening, just do it)
+
+*-y, --yes*::
+ Answer "yes" to all prompts (use with caution)
+
+*--arbitrator=IP*::
+ IP address of geo cluster arbitrator
+
+*--clusters=DESC*::
+ Cluster description (see details below)
+
+*--tickets=LIST*::
+ Tickets to create (space-separated)
+
+
+Cluster Description:
+
+This is a map of cluster names to IP addresses.
+Each IP address will be configured as a virtual IP
+representing that cluster in the geo cluster
+configuration.
+
+Example with two clusters named paris and amsterdam:
+
+............
+ --clusters "paris=192.168.10.10 amsterdam=192.168.10.11"
+............
+
+Name clusters using the +--name+ parameter to `init`.
+
+Usage:
+...............
+geo-init [options]
+...............
+
+
+[[cmdhelp_cluster_geo_init_arbitrator,Initialize node as geo cluster arbitrator]]
+==== `geo-init-arbitrator`
+
+Configure the current node as a geo arbitrator. The command
+requires an existing geo cluster or geo arbitrator from which
+to get the geo cluster configuration.
+
+Options:
+
+*--clusters=DESC*::
+ Cluster description (see +geo-init+ for details)
+
+*-c IP, --cluster-node=IP*::
+ IP address of an already-configured geo cluster
+
+Usage:
+...............
+geo-init-arbitrator [options]
+...............
+
+
+[[cmdhelp_cluster_geo_join,Join cluster to existing geo cluster]]
+==== `geo-join`
+
+This command should be run from one of the nodes in a cluster
+which is currently not a member of a geo cluster. The geo
+cluster configuration will be fetched from the provided node,
+and the cluster will be added to the geo cluster.
+
+Note that each cluster in a geo cluster needs to have a unique
+name set. The cluster name can be set using the `--name` argument
+to `init`, or by configuring corosync with the cluster name in
+an existing cluster.
+
+Options:
+
+*-c IP, --cluster-node=IP*::
+ IP address of an already-configured geo cluster or arbitrator
+
+Usage:
+...............
+geo-join [options]
+...............
+
+
+[[cmdhelp_cluster_health,Cluster health check]]
+==== `health`
+
+Runs a larger set of tests and queries on all nodes in the cluster to
+verify the general system health and detect potential problems.
+
+Usage:
+...............
+health
+...............
+
+[[cmdhelp_cluster_init,Initializes a new HA cluster]]
+==== `init`
+
+Initialize a cluster from scratch. This command configures
+a complete cluster, and can also add additional cluster
+nodes to the initial one-node cluster using the `--nodes`
+option.
+
+Options:
+
+*-q, --quiet*::
+ Be quiet (don't describe what's happening, just do it)
+
+*-y, --yes*::
+ Answer "yes" to all prompts (use with caution, this
+ is destructive, especially during the "storage" stage)
+
+*-t TEMPLATE, --template=TEMPLATE**::
+ Optionally configure cluster with template "name"
+ (currently only "ocfs2" is valid here)
+
+*-n NAME, --name=NAME*::
+ Set the name of the configured cluster.
+
+*-N NODES, --nodes=NODES*::
+ Additional nodes to add to the created cluster. May
+ include the current node, which will always be the
+ initial cluster node.
+
+*-w WATCHDOG, --watchdog=WATCHDOG*::
+ Use the given watchdog device.
+
+Network configuration:
+
+Options for configuring the network and messaging layer.
+
+*-i IF, --interface=IF*::
+ Bind to IP address on interface IF
+
+*-u, --unicast*::
+ Configure corosync to communicate over unicast (UDP),
+ and not multicast. Default is multicast unless an
+ environment where multicast cannot be used is
+ detected.
+
+*-A IP, --admin-ip=IP*::
+ Configure IP address as an administration virtual IP
+
+Storage configuration:
+
+Options for configuring shared storage.
+
+*-p DEVICE, --partition-device=DEVICE*::
+ Partition this shared storage device (only used in
+ "storage" stage)
+
+*-s DEVICE, --sbd-device=DEVICE*::
+ Block device to use for SBD fencing
+
+*-o DEVICE, --ocfs2-device=DEVICE*::
+ Block device to use for OCFS2 (only used in "vgfs"
+ stage)
+
+
+Stage can be one of:
+
+*ssh*::
+ Create SSH keys for passwordless SSH between cluster nodes
+
+*csync2*::
+ Configure csync2
+
+*corosync*::
+ Configure corosync
+
+*storage*::
+ Partition shared storage (ocfs2 template only)
+
+*sbd*::
+ Configure SBD (requires -s <dev>)
+
+*cluster*::
+ Bring the cluster online
+
+*vgfs*::
+ Create volume group and filesystem (ocfs2 template only, requires `-o <dev>`)
+
+*admin*::
+ Create administration virtual IP (optional)
+
+[NOTE]
+============
+- If stage is not specified, the script will run through each stage
+ in sequence, with prompts for required information.
+- If using the ocfs2 template, the storage stage will partition a block
+ device into two pieces, one for SBD, the remainder for OCFS2. This is
+ good for testing and demonstration, but not ideal for production.
+ To use storage you have already configured, pass -s and -o to specify
+ the block devices for SBD and OCFS2, and the automatic partitioning
+ will be skipped.
+============
+
+Usage:
+...............
+init [options] [STAGE]
+...............
+
+
+[[cmdhelp_cluster_join,Join existing cluster]]
+==== `join`
+
+Join the current node to an existing cluster. The
+current node cannot be a member of a cluster already.
+Pass any node in the existing cluster as the argument
+to the `-c` option.
+
+Options:
+
+*-q, --quiet*::
+ Be quiet (don't describe what's happening, just do it)
+
+*-y, --yes*::
+ Answer "yes" to all prompts (use with caution)
+
+*-w WATCHDOG, --watchdog=WATCHDOG*::
+ Use the given watchdog device
+
+Network configuration:
+
+Options for configuring the network and messaging layer.
+
+
+*-c HOST, --cluster-node=HOST*::
+ IP address or hostname of existing cluster node
+
+*-i IF, --interface=IF*::
+ Bind to IP address on interface IF
+
+
+Stage can be one of:
+
+*ssh*::
+ Obtain SSH keys from existing cluster node (requires -c <host>)
+
+*csync2*::
+ Configure csync2 (requires -c <host>)
+
+*ssh_merge*::
+ Merge root's SSH known_hosts across all nodes (csync2 must
+ already be configured).
+
+*cluster*::
+ Start the cluster on this node
+
+If stage is not specified, each stage will be invoked in sequence.
+
+Usage:
+...............
+join [options] [STAGE]
+...............
+
+
+[[cmdhelp_cluster_remove,Remove node(s) from the cluster]]
+==== `remove`
+
+Remove one or more nodes from the cluster.
+
+This command can remove the last node in the cluster,
+thus effectively removing the whole cluster. To remove
+the last node, pass `--force` argument to `crm` or set
+the `config.core.force` option.
+
+Options:
+
+*-q, --quiet*::
+ Be quiet (don't describe what's happening, just do it)
+
+*-y, --yes*::
+ Answer "yes" to all prompts (use with caution)
+
+*-c HOST, --cluster-node=HOST*::
+ IP address or hostname of cluster node which will be
+ removed from the cluster
+
+Usage:
+...............
+remove [options] [<node> ...]
+...............
+
+
+[[cmdhelp_cluster_run,Execute an arbitrary command on all nodes]]
+==== `run`
+
+This command takes a shell statement as argument, executes that
+statement on all nodes in the cluster, and reports the result.
+
+Usage:
+...............
+run <command>
+...............
+
+Example:
+...............
+run "cat /proc/uptime"
+...............
+
+[[cmdhelp_cluster_start,Start cluster services]]
+==== `start`
+
+Starts the cluster-related system services on this node.
+
+Usage:
+.........
+start
+.........
+
+[[cmdhelp_cluster_status,Cluster status check]]
+==== `status`
+
+Reports the status for the cluster messaging layer on the local
+node.
+
+Usage:
+...............
+status
+...............
+
+[[cmdhelp_cluster_stop,Stop cluster services]]
+==== `stop`
+
+Stops the cluster-related system services on this node.
+
+Usage:
+.........
+stop
+.........
+
+[[cmdhelp_cluster_wait_for_startup,Wait for cluster to start]]
+==== `wait_for_startup`
+
+Mostly useful in scripts or automated workflows, this command will
+attempt to connect to the local cluster node repeatedly. The command
+will keep trying until the cluster node responds, or the `timeout`
+elapses. The timeout can be changed by supplying a value in seconds as
+an argument.
+
+Usage:
+........
+wait_for_startup
+........
+
+[[cmdhelp_script,Cluster script management]]
+=== `script` - Cluster script management
+
+A big part of the configuration and management of a cluster is
+collecting information about all cluster nodes and deploying changes
+to those nodes. Often, just performing the same procedure on all nodes
+will encounter problems, due to subtle differences in the
+configuration.
+
+For example, when configuring a cluster for the first time, the
+software needs to be installed and configured on all nodes before the
+cluster software can be launched and configured using `crmsh`. This
+process is cumbersome and error-prone, and the goal is for scripts to
+make this process easier.
+
+Scripts are implemented using the python `parallax` package which
+provides a thin wrapper on top of SSH. This allows the scripts to
+function through the usual SSH channels used for system maintenance,
+requiring no additional software to be installed or maintained.
+
+[[cmdhelp_script_json,JSON API for cluster scripts]]
+==== `json`
+
+This command provides a JSON API for the cluster scripts, intended for
+use in user interface tools that want to interact with the cluster via
+scripts.
+
+The command takes a single argument, which should be a JSON array with
+the first member identifying the command to perform.
+
+The output is line-based: Commands that return multiple results will
+return them line-by-line, ending with a terminator value: "end".
+
+When providing parameter values to this command, they should be
+provided as nested objects, so +virtual-ip:ip=192.168.0.5+ on the
+command line becomes the JSON object
++{"virtual-ip":{"ip":"192.168.0.5"}}+.
+
+API:
+........
+["list"]
+=> [{name, shortdesc, category}]
+
+["show", <name>]
+=> [{name, shortdesc, longdesc, category, <<steps>>}]
+
+<<steps>> := [{name, shortdesc], longdesc, required, parameters, steps}]
+
+<<params>> := [{name, shortdesc, longdesc, required, unique, advanced,
+ type, value, example}]
+
+["verify", <name>, <<values>>]
+=> [{shortdesc, longdesc, text, nodes}]
+
+["run", <name>, <<values>>]
+=> [{shortdesc, rc, output|error}]
+........
+
+
+[[cmdhelp_script_list,List available scripts]]
+==== `list`
+
+Lists the available scripts, sorted by category. Scripts that have the
+special `Script` category are hidden by default, since they are mainly
+used by other scripts or commands. To also show these, pass `all` as
+argument.
+
+To get a flat list of script names, not sorted by category, pass
+`names` as an extra argument.
+
+Usage:
+............
+list [all] [names]
+............
+
+Example:
+............
+list
+list all names
+............
+
+[[cmdhelp_script_run,Run the script]]
+==== `run`
+
+Given a list of parameter values, this command will execute the
+actions specified by the cluster script. The format for the parameter
+values is the same as for the `verify` command.
+
+Can optionally take at least two parameters:
+* `nodes=<nodes>`: List of nodes that the script runs over
+* `dry_run=yes|no`: If set, the script will not perform any modifications.
+
+Additional parameters may be available depending on the script.
+
+Use the `show` command to see what parameters are available.
+
+Usage:
+.............
+run <script> [args...]
+.............
+
+Example:
+.............
+run apache install=true
+run sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+.............
+
+[[cmdhelp_script_show,Describe the script]]
+==== `show`
+
+Prints a description and short summary of the script, with
+descriptions of the accepted parameters.
+
+Advanced parameters are hidden by default. To show the complete list
+of parameters accepted by the script, pass `all` as argument.
+
+Usage:
+............
+show <script> [all]
+............
+
+Example:
+............
+show virtual-ip
+............
+
+[[cmdhelp_script_verify,Verify the script]]
+==== `verify`
+
+Checks the given parameter values, and returns a list
+of actions that will be executed when running the script
+if provided the same list of parameter values.
+
+Usage:
+............
+verify <script> [args...]
+............
+
+Example:
+............
+verify sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+............
+
+[[cmdhelp_corosync,Corosync management]]
+=== `corosync` - Corosync management
+
+Corosync is the underlying messaging layer for most HA clusters.
+This level provides commands for editing and managing the corosync
+configuration.
+
+[[cmdhelp_corosync_add-node,Add a corosync node]]
+==== `add-node`
+
+Adds a node to the corosync configuration. This is used with the `udpu`
+type configuration in corosync.
+
+A nodeid for the added node is generated automatically.
+
+Note that this command assumes that only a single ring is used, and
+sets only the address for ring0.
+
+Usage:
+.........
+add-node <addr> [name]
+.........
+
+[[cmdhelp_corosync_del-node,Remove a corosync node]]
+==== `del-node`
+
+Removes a node from the corosync configuration. The argument given is
+the `ring0_addr` address set in the configuration file.
+
+Usage:
+.........
+del-node <addr>
+.........
+
+[[cmdhelp_corosync_diff,Diffs the corosync configuration]]
+==== `diff`
+
+Diffs the corosync configurations on different nodes. If no nodes are
+given as arguments, the corosync configurations on all nodes in the
+cluster are compared.
+
+`diff` takes an option argument `--checksum`, to display a checksum
+for each file instead of calculating a diff.
+
+Usage:
+.........
+diff [--checksum] [node...]
+.........
+
+[[cmdhelp_corosync_edit,Edit the corosync configuration]]
+==== `edit`
+
+Opens the Corosync configuration file in an editor.
+
+Usage:
+.........
+edit
+.........
+
+[[cmdhelp_corosync_get,Get a corosync configuration value]]
+==== `get`
+
+Returns the value configured in `corosync.conf`, which is not
+necessarily the value used in the running configuration. See `reload`
+for telling corosync about configuration changes.
+
+The argument is the complete dot-separated path to the value.
+
+If there are multiple values configured with the same path, the
+command returns all values for that path. For example, to get all
+configured `ring0_addr` values, use this command:
+
+Example:
+........
+get nodelist.node.ring0_addr
+........
+
+[[cmdhelp_corosync_log,Show the corosync log file]]
+==== `log`
+
+Opens the log file specified in the corosync configuration file. If no
+log file is configured, this command returns an error.
+
+The pager used can be configured either using the PAGER
+environment variable or in `crm.conf`.
+
+Usage:
+.........
+log
+.........
+
+[[cmdhelp_corosync_pull,Pulls the corosync configuration]]
+==== `pull`
+
+Gets the corosync configuration from another node and copies
+it to this node.
+
+Usage:
+.........
+pull <node>
+.........
+
+[[cmdhelp_corosync_push,Push the corosync configuration]]
+==== `push`
+
+Pushes the corosync configuration file on this node to
+the list of nodes provided. If no target nodes are given,
+the configuration is pushed to all other nodes in the cluster.
+
+It is recommended to use `csync2` to distribute the cluster
+configuration files rather than relying on this command.
+
+Usage:
+.........
+push [node] ...
+.........
+
+Example:
+.........
+push node-2 node-3
+.........
+
+[[cmdhelp_corosync_reload,Reload the corosync configuration]]
+==== `reload`
+
+Tells all instances of corosync in this cluster to reload
+`corosync.conf`.
+
+After pushing a new configuration to all cluster nodes, call this
+command to make corosync use the new configuration.
+
+Usage:
+.........
+reload
+.........
+
+[[cmdhelp_corosync_set,Set a corosync configuration value]]
+==== `set`
+
+Sets the value identified by the given path. If the value does not
+exist in the configuration file, it will be added. However, if the
+section containing the value does not exist, the command will fail.
+
+Usage:
+.........
+set quorum.expected_votes 2
+.........
+
+[[cmdhelp_corosync_show,Display the corosync configuration]]
+==== `show`
+
+Displays the corosync configuration on the current node.
+
+.........
+show
+.........
+
+[[cmdhelp_corosync_status,Display the corosync status]]
+==== `status`
+
+Displays the status of Corosync, including the votequorum state.
+
+Usage:
+.........
+status
+.........
+
+[[cmdhelp_cib,CIB shadow management]]
+=== `cib` - CIB shadow management
+
+This level is for management of shadow CIBs. It is available both
+at the top level and the `configure` level.
+
+All the commands are implemented using `cib_shadow(8)` and the
+`CIB_shadow` environment variable. The user prompt always
+includes the name of the currently active shadow or the live CIB.
+
+[[cmdhelp_cib_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_cib_commit,copy a shadow CIB to the cluster]]
+==== `commit`
+
+Apply a shadow CIB to the cluster. If the shadow name is omitted
+then the current shadow CIB is applied.
+
+Temporary shadow CIBs are removed automatically on commit.
+
+Usage:
+...............
+commit [<cib>]
+...............
+
+[[cmdhelp_cib_delete,delete a shadow CIB]]
+==== `delete`
+
+Delete an existing shadow CIB.
+
+Usage:
+...............
+delete <cib>
+...............
+
+[[cmdhelp_cib_diff,diff between the shadow CIB and the live CIB]]
+==== `diff`
+
+Print differences between the current cluster configuration and
+the active shadow CIB.
+
+Usage:
+...............
+diff
+...............
+
+[[cmdhelp_cib_import,import a CIB or PE input file to a shadow]]
+==== `import`
+
+At times it may be useful to create a shadow file from the
+existing CIB. The CIB may be specified as file or as a PE input
+file number. The shell will look up files in the local directory
+first and then in the PE directory (typically `/var/lib/pengine`).
+Once the CIB file is found, it is copied to a shadow and this
+shadow is immediately available for use at both `configure` and
+`cibstatus` levels.
+
+If the shadow name is omitted then the target shadow is named
+after the input CIB file.
+
+Note that there are often more than one PE input file, so you may
+need to specify the full name.
+
+Usage:
+...............
+import {<file>|<number>} [<shadow>]
+...............
+Examples:
+...............
+import pe-warn-2222
+import 2289 issue2
+...............
+
+[[cmdhelp_cib_list,list all shadow CIBs]]
+==== `list`
+
+List existing shadow CIBs.
+
+Usage:
+...............
+list
+...............
+
+[[cmdhelp_cib_new,create a new shadow CIB]]
+==== `new`
+
+Create a new shadow CIB. The live cluster configuration and
+status is copied to the shadow CIB.
+
+If the name of the shadow is omitted, we create a temporary CIB
+shadow. It is useful if multiple level sessions are desired
+without affecting the cluster. A temporary CIB shadow is short
+lived and will be removed either on `commit` or on program exit.
+Note that if the temporary shadow is not committed all changes in
+the temporary shadow are lost.
+
+Specify `withstatus` if you want to edit the status section of
+the shadow CIB (see the <<cmdhelp_cibstatus,cibstatus section>>).
+Add `force` to force overwriting the existing shadow CIB.
+
+To start with an empty configuration that is not copied from the live
+CIB, specify the `empty` keyword. (This also allows a shadow CIB to be
+created in case no cluster is running.)
+
+Usage:
+...............
+new [<cib>] [withstatus] [force] [empty]
+...............
+
+[[cmdhelp_cib_reset,copy live cib to a shadow CIB]]
+==== `reset`
+
+Copy the current cluster configuration into the shadow CIB.
+
+Usage:
+...............
+reset <cib>
+...............
+
+[[cmdhelp_cib_use,change working CIB]]
+==== `use`
+
+Choose a CIB source. If you want to edit the status from the
+shadow CIB specify `withstatus` (see <<cmdhelp_cibstatus,`cibstatus`>>).
+Leave out the CIB name to switch to the running CIB.
+
+Usage:
+...............
+use [<cib>] [withstatus]
+...............
+
+[[cmdhelp_ra,Resource Agents (RA) lists and documentation]]
+=== `ra` - Resource Agents (RA) lists and documentation
+
+This level contains commands which show various information about
+the installed resource agents. It is available both at the top
+level and at the `configure` level.
+
+[[cmdhelp_ra_classes,list classes and providers]]
+==== `classes`
+
+Print all resource agents' classes and, where appropriate, a list
+of available providers.
+
+Usage:
+...............
+classes
+...............
+
+[[cmdhelp_ra_info,show meta data for a RA]]
+==== `info` (`meta`)
+
+Show the meta-data of a resource agent type. This is where users
+can find information on how to use a resource agent. It is also
+possible to get information from some programs: `pengine`,
+`crmd`, `cib`, and `stonithd`. Just specify the program name
+instead of an RA.
+
+Usage:
+...............
+info [<class>:[<provider>:]]<type>
+info <type> <class> [<provider>] (obsolete)
+...............
+Example:
+...............
+info apache
+info ocf:pacemaker:Dummy
+info stonith:ipmilan
+info pengine
+...............
+
+[[cmdhelp_ra_list,list RA for a class (and provider)]]
+==== `list`
+
+List available resource agents for the given class. If the class
+is `ocf`, supply a provider to get agents which are available
+only from that provider.
+
+Usage:
+...............
+list <class> [<provider>]
+...............
+Example:
+...............
+list ocf pacemaker
+...............
+
+[[cmdhelp_ra_providers,show providers for a RA and a class]]
+==== `providers`
+
+List providers for a resource agent type. The class parameter
+defaults to `ocf`.
+
+Usage:
+...............
+providers <type> [<class>]
+...............
+Example:
+...............
+providers apache
+...............
+
+[[cmdhelp_ra_validate,validate parameters for RA]]
+==== `validate`
+
+If the resource agent supports the `validate-all` action, this calls
+the action with the given parameters, printing any warnings or errors
+reported by the agent.
+
+Usage:
+................
+validate <agent> [<key>=<value> ...]
+................
+
+[[cmdhelp_resource,Resource management]]
+=== `resource` - Resource management
+
+At this level resources may be managed.
+
+All (or almost all) commands are implemented with the CRM tools
+such as `crm_resource(8)`.
+
+[[cmdhelp_resource_ban,ban a resource from a node]]
+==== `ban`
+
+Ban a resource from running on a certain node. If no node is given
+as argument, the resource is banned from the current location.
+
+See `move` for details on other arguments.
+
+Usage:
+...............
+ban <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_cleanup,cleanup resource status]]
+==== `cleanup`
+
+Cleanup resource status. Typically done after the resource has
+temporarily failed. If a node is omitted, cleanup on all nodes.
+If there are many nodes, the command may take a while.
+
++(Pacemaker 1.1.14)+ Pass force to cleanup the resource itself,
+otherwise the cleanup command will apply to the parent resource (if
+any).
+
+Usage:
+...............
+cleanup <rsc> [<node>] [force]
+...............
+
+[[cmdhelp_resource_clear,Clear any relocation constraint]]
+==== `clear` (`unmove`, `unmigrate`, `unban`)
+
+Remove any relocation constraint created by
+the `move`, `migrate` or `ban` command.
+
+Usage:
+...............
+clear <rsc>
+unmigrate <rsc>
+unban <rsc>
+...............
+
+[[cmdhelp_resource_constraints,Show constraints affecting a resource]]
+==== `constraints`
+
+Display the location and colocation constraints affecting the
+resource.
+
+Usage:
+................
+constraints <rsc>
+................
+
+[[cmdhelp_resource_demote,demote a master-slave resource]]
+==== `demote`
+
+Demote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+demote <rsc>
+...............
+
+[[cmdhelp_resource_failcount,manage failcounts]]
+==== `failcount`
+
+Show/edit/delete the failcount of a resource.
+
+Usage:
+...............
+failcount <rsc> set <node> <value>
+failcount <rsc> delete <node>
+failcount <rsc> show <node>
+...............
+Example:
+...............
+failcount fs_0 delete node2
+...............
+
+[[cmdhelp_resource_locate,show the location of resources]]
+==== `locate`
+
+Show the current location of one or more resources.
+
+Usage:
+...............
+locate [<rsc> ...]
+...............
+
+[[cmdhelp_resource_maintenance,Enable/disable per-resource maintenance mode]]
+==== `maintenance`
+
+Enables or disables the per-resource maintenance mode. When this mode
+is enabled, no monitor operations will be triggered for the resource.
+`maintenance` attribute conflicts with the `is-managed`. When setting
+the `maintenance` attribute, the user is proposed to remove the
+`is-managed` attribute if it exists.
+
+Usage:
+..................
+maintenance <resource> [on|off|true|false]
+..................
+
+Example:
+..................
+maintenance rsc1
+maintenance rsc2 off
+..................
+
+[[cmdhelp_resource_manage,put a resource into managed mode]]
+==== `manage`
+
+Manage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+`is-managed` attribute conflicts with the `maintenance`. When setting
+the `is-managed` attribute, the user is proposed to remove the
+`maintenance` attribute if it exists.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+manage <rsc>
+...............
+
+[[cmdhelp_resource_meta,manage a meta attribute]]
+==== `meta`
+
+Show/edit/delete a meta attribute of a resource. Currently, all
+meta attributes of a resource may be managed with other commands
+such as `resource stop`.
+
+Usage:
+...............
+meta <rsc> set <attr> <value>
+meta <rsc> delete <attr>
+meta <rsc> show <attr>
+...............
+Example:
+...............
+meta ip_0 set target-role stopped
+...............
+
+[[cmdhelp_resource_move,Move a resource to another node]]
+==== `move` (`migrate`)
+
+Move a resource away from its current location.
+
+If the destination node is left out, the resource is migrated by
+creating a constraint which prevents it from running on the current
+node. For this type of constraint to be created, the +force+ argument
+is required.
+
+A lifetime may be given for the constraint. Once it expires, the
+location constraint will no longer be active.
+
+Usage:
+...............
+move <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_operations,Show active resource operations]]
+==== `operations`
+
+Show active operations, optionally filtered by resource and node.
+
+Usage:
+................
+operations [<rsc>] [<node>]
+................
+
+[[cmdhelp_resource_param,manage a parameter of a resource]]
+==== `param`
+
+Show/edit/delete a parameter of a resource.
+
+Usage:
+...............
+param <rsc> set <param> <value>
+param <rsc> delete <param>
+param <rsc> show <param>
+...............
+Example:
+...............
+param ip_0 show ip
+...............
+
+[[cmdhelp_resource_promote,promote a master-slave resource]]
+==== `promote`
+
+Promote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+promote <rsc>
+...............
+
+[[cmdhelp_resource_refresh,refresh CIB from the LRM status]]
+==== `refresh`
+
+Refresh CIB from the LRM status.
+
+.Note
+****************************
+`refresh` has been deprecated and is now
+an alias for `cleanup`.
+****************************
+
+Usage:
+...............
+refresh [<node>]
+...............
+
+[[cmdhelp_resource_reprobe,probe for resources not started by the CRM]]
+==== `reprobe`
+
+Probe for resources not started by the CRM.
+
+.Note
+****************************
+`reprobe` has been deprecated and is now
+an alias for `cleanup`.
+****************************
+
+Usage:
+...............
+reprobe [<node>]
+...............
+
+[[cmdhelp_resource_restart,restart resources]]
+==== `restart`
+
+Restart one or more resources. This is essentially a shortcut for
+resource stop followed by a start. The shell is first going to wait
+for the stop to finish, that is for all resources to really stop, and
+only then to order the start action. Due to this command
+entailing a whole set of operations, informational messages are
+printed to let the user see some progress.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+restart <rsc> [<rsc> ...]
+...............
+Example:
+...............
+# crm resource restart g_webserver
+INFO: ordering g_webserver to stop
+waiting for stop to finish .... done
+INFO: ordering g_webserver to start
+#
+...............
+
+[[cmdhelp_resource_scores,Display resource scores]]
+==== `scores`
+
+Display the allocation scores for all resources.
+
+Usage:
+................
+scores
+................
+
+[[cmdhelp_resource_secret,manage sensitive parameters]]
+==== `secret`
+
+Sensitive parameters can be kept in local files rather than CIB
+in order to prevent accidental data exposure. Use the `secret`
+command to manage such parameters. `stash` and `unstash` move the
+value from the CIB and back to the CIB respectively. The `set`
+subcommand sets the parameter to the provided value. `delete`
+removes the parameter completely. `show` displays the value of
+the parameter from the local file. Use `check` to verify if the
+local file content is valid.
+
+Usage:
+...............
+secret <rsc> set <param> <value>
+secret <rsc> stash <param>
+secret <rsc> unstash <param>
+secret <rsc> delete <param>
+secret <rsc> show <param>
+secret <rsc> check <param>
+...............
+Example:
+...............
+secret fence_1 show password
+secret fence_1 stash password
+secret fence_1 set password secret_value
+...............
+
+[[cmdhelp_resource_start,start resources]]
+==== `start`
+
+Start one or more resources by setting the `target-role` attribute. If
+there are multiple meta attributes sets, the attribute is set in all
+of them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+start <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_status,show status of resources]]
+==== `status` (`show`, `list`)
+
+Print resource status. More than one resource can be shown at once. If
+the resource parameter is left out, the status of all resources is
+printed.
+
+Usage:
+...............
+status [<rsc> ...]
+...............
+
+[[cmdhelp_resource_stop,stop resources]]
+==== `stop`
+
+Stop one or more resources using the `target-role` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+stop <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_trace,start RA tracing]]
+==== `trace`
+
+Start tracing RA for the given operation. The trace files are
+stored in `$HA_VARLIB/trace_ra`. If the operation to be traced is
+monitor, note that the number of trace files can grow very
+quickly.
+
+If no operation name is given, crmsh will attempt to trace all
+operations for the RA. This includes any configured operations, start
+and stop as well as promote/demote for multistate resources.
+
+To trace the probe operation which exists for all resources, either
+set a trace for `monitor` with interval `0`, or use `probe` as the
+operation name.
+
+Usage:
+...............
+trace <rsc> [<op> [<interval>] ]
+...............
+Example:
+...............
+trace fs start
+trace webserver
+trace webserver probe
+trace fs monitor 0
+...............
+
+[[cmdhelp_resource_unmanage,put a resource into unmanaged mode]]
+==== `unmanage`
+
+Unmanage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+unmanage <rsc>
+...............
+
+[[cmdhelp_resource_untrace,stop RA tracing]]
+==== `untrace`
+
+Stop tracing RA for the given operation. If no operation name is
+given, crmsh will attempt to stop tracing all operations in resource.
+
+Usage:
+...............
+untrace <rsc> [<op> [<interval>] ]
+...............
+Example:
+...............
+untrace fs start
+untrace webserver
+...............
+
+[[cmdhelp_resource_utilization,manage a utilization attribute]]
+==== `utilization`
+
+Show/edit/delete a utilization attribute of a resource. These
+attributes describe hardware requirements. By setting the
+`placement-strategy` cluster property appropriately, it is
+possible then to distribute resources based on resource
+requirements and node size. See also <<cmdhelp_node_utilization,node utilization attributes>>.
+
+Usage:
+...............
+utilization <rsc> set <attr> <value>
+utilization <rsc> delete <attr>
+utilization <rsc> show <attr>
+...............
+Example:
+...............
+utilization xen1 set memory 4096
+...............
+
+[[cmdhelp_node,Node management]]
+=== `node` - Node management
+
+Node management and status commands.
+
+[[cmdhelp_node_attribute,manage attributes]]
+==== `attribute`
+
+Edit node attributes. This kind of attribute should refer to
+relatively static properties, such as memory size.
+
+Usage:
+...............
+attribute <node> set <attr> <value>
+attribute <node> delete <attr>
+attribute <node> show <attr>
+...............
+Example:
+...............
+attribute node_1 set memory_size 4096
+...............
+
+[[cmdhelp_node_clearstate,Clear node state]]
+==== `clearstate`
+
+Resets and clears the state of the specified node. This node is
+afterwards assumed clean and offline. This command can be used to
+manually confirm that a node has been fenced (e.g., powered off).
+
+Be careful! This can cause data corruption if you confirm that a node is
+down that is, in fact, not cleanly down - the cluster will proceed as if
+the fence had succeeded, possibly starting resources multiple times.
+
+Usage:
+...............
+clearstate <node>
+...............
+
+[[cmdhelp_node_delete,delete node]]
+==== `delete`
+
+Delete a node. This command will remove the node from the CIB
+and, in case the cluster stack is running, use the appropriate
+program (`crm_node` or `hb_delnode`) to remove the node from the
+membership.
+
+If the node is still listed as active and a member of our
+partition we refuse to remove it. With the global force option
+(`-F`) we will try to delete the node anyway.
+
+Usage:
+...............
+delete <node>
+...............
+
+[[cmdhelp_node_fence,fence node]]
+==== `fence`
+
+Make CRM fence a node. This functionality depends on stonith
+resources capable of fencing the specified node. No such stonith
+resources, no fencing will happen.
+
+Usage:
+...............
+fence <node>
+...............
+
+[[cmdhelp_node_maintenance,put node into maintenance mode]]
+==== `maintenance`
+
+Set the node status to maintenance. This is equivalent to the
+cluster-wide `maintenance-mode` property but puts just one node
+into the maintenance mode. If there are maintenaned resources on
+the node, the user will be proposed to remove the maintenance
+property from them.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+maintenance [<node>]
+...............
+
+[[cmdhelp_node_online,set node online]]
+==== `online`
+
+Set a node to online status.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+online [<node>]
+...............
+
+[[cmdhelp_node_ready,put node into ready mode]]
+==== `ready`
+
+Set the node's maintenance status to `off`. The node should be
+now again fully operational and capable of running resource
+operations.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+ready [<node>]
+...............
+
+[[cmdhelp_node_server,show node hostname or server address]]
+==== `server`
+
+Remote nodes may have a configured server address which should
+be used when contacting the node. This command prints the
+server address if configured, else the node name.
+
+If no parameter is given, the addresses or names for all nodes
+are printed.
+
+Usage:
+...............
+server [<node> ...]
+...............
+
+[[cmdhelp_node_show,show node]]
+==== `show`
+
+Show a node definition. If the node parameter is omitted then all
+nodes are shown.
+
+Usage:
+...............
+show [<node>]
+...............
+
+[[cmdhelp_node_standby,put node into standby]]
+==== `standby`
+
+Set a node to standby status. The node parameter defaults to the
+node where the command is run.
+
+Additionally, you may specify a lifetime for the standby---if set to
+`reboot`, the node will be back online once it reboots. `forever` will
+keep the node in standby after reboot. The life time defaults to
+`forever`.
+
+Usage:
+...............
+standby [<node>] [<lifetime>]
+
+lifetime :: reboot | forever
+...............
+
+Example:
+...............
+standby bob reboot
+...............
+
+
+[[cmdhelp_node_status,show nodes' status as XML]]
+==== `status`
+
+Show nodes' status as XML. If the node parameter is omitted then
+all nodes are shown.
+
+Usage:
+...............
+status [<node>]
+...............
+
+[[cmdhelp_node_status-attr,manage status attributes]]
+==== `status-attr`
+
+Edit node attributes which are in the CIB status section, i.e.
+attributes which hold properties of a more volatile nature. One
+typical example is attribute generated by the `pingd` utility.
+
+Usage:
+...............
+status-attr <node> set <attr> <value>
+status-attr <node> delete <attr>
+status-attr <node> show <attr>
+...............
+Example:
+...............
+status-attr node_1 show pingd
+...............
+
+[[cmdhelp_node_utilization,manage utilization attributes]]
+==== `utilization`
+
+Edit node utilization attributes. These attributes describe
+hardware characteristics as integer numbers such as memory size
+or the number of CPUs. By setting the `placement-strategy`
+cluster property appropriately, it is possible then to distribute
+resources based on resource requirements and node size. See also
+<<cmdhelp_resource_utilization,resource utilization attributes>>.
+
+Usage:
+...............
+utilization <node> set <attr> <value>
+utilization <node> delete <attr>
+utilization <node> show <attr>
+...............
+Examples:
+...............
+utilization node_1 set memory 16384
+utilization node_1 show cpu
+...............
+
+[[cmdhelp_site,GEO clustering site support]]
+=== `site` - GEO clustering site support
+
+A cluster may consist of two or more subclusters in different and
+distant locations. This set of commands supports such setups.
+
+[[cmdhelp_site_ticket,manage site tickets]]
+==== `ticket`
+
+Tickets are cluster-wide attributes. They can be managed at the
+site where this command is executed.
+
+It is then possible to constrain resources depending on the
+ticket availability (see the <<cmdhelp_configure_rsc_ticket,`rsc_ticket`>> command
+for more details).
+
+Usage:
+...............
+ticket {grant|revoke|standby|activate|show|time|delete} <ticket>
+...............
+Example:
+...............
+ticket grant ticket1
+...............
+
+[[cmdhelp_options,User preferences]]
+=== `options` - User preferences
+
+The user may set various options for the crm shell itself.
+
+[[cmdhelp_options_add-quotes,add quotes around parameters containing spaces]]
+==== `add-quotes`
+
+The shell (as in `/bin/sh`) parser strips quotes from the command
+line. This may sometimes make it really difficult to type values
+which contain white space. One typical example is the configure
+filter command. The crm shell will supply extra quotes around
+arguments which contain white space. The default is `yes`.
+
+.Note on quotes use
+****************************
+Adding quotes around arguments automatically has been introduced
+with version 1.2.2 and it is technically a regression. Being a
+regression is the only reason the `add-quotes` option exists. If
+you have custom shell scripts which would break, just set the
+`add-quotes` option to `no`.
+
+For instance, with adding quotes enabled, it is possible to do
+the following:
+...............
+# crm configure primitive d1 Dummy \
+ meta description="some description here"
+# crm configure filter 'sed "s/hostlist=./&node-c /"' fencing
+...............
+****************************
+
+[[cmdhelp_options_check-frequency,when to perform semantic check]]
+==== `check-frequency`
+
+Semantic check of the CIB or elements modified or created may be
+done on every configuration change (`always`), when verifying
+(`on-verify`) or `never`. It is by default set to `always`.
+Experts may want to change the setting to `on-verify`.
+
+The checks require that resource agents are present. If they are
+not installed at the configuration time set this preference to
+`never`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_check-mode,how to treat semantic errors]]
+==== `check-mode`
+
+Semantic check of the CIB or elements modified or created may be
+done in the `strict` mode or in the `relaxed` mode. In the former
+certain problems are treated as configuration errors. In the
+`relaxed` mode all are treated as warnings. The default is `strict`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_colorscheme,set colors for output]]
+==== `colorscheme`
+
+With `output` set to `color`, a comma separated list of colors
+from this option are used to emphasize:
+
+- keywords
+- object ids
+- attribute names
+- attribute values
+- scores
+- resource references
+
+`crm` can show colors only if there is curses support for python
+installed (usually provided by the `python-curses` package). The
+colors are whatever is available in your terminal. Use `normal`
+if you want to keep the default foreground color.
+
+This user preference defaults to
+`yellow,normal,cyan,red,green,magenta` which is good for
+terminals with dark background. You may want to change the color
+scheme and save it in the preferences file for other color
+setups.
+
+Example:
+...............
+colorscheme yellow,normal,blue,red,green,magenta
+...............
+
+[[cmdhelp_options_editor,set preferred editor program]]
+==== `editor`
+
+The `edit` command invokes an editor. Use this to specify your
+preferred editor program. If not set, it will default to either
+the value of the `EDITOR` environment variable or to one of the
+standard UNIX editors (`vi`,`emacs`,`nano`).
+
+Usage:
+...............
+editor program
+...............
+Example:
+...............
+editor vim
+...............
+
+[[cmdhelp_options_manage-children,how to handle children resource attributes]]
+==== `manage-children`
+
+Some resource management commands, such as `resource stop`, when
+the target resource is a group, may not always produce desired
+result. Each element, group and the primitive members, can have a
+meta attribute and those attributes may end up with conflicting
+values. Consider the following construct:
+...............
+crm(live)# configure show svc fs virtual-ip
+primitive fs Filesystem \
+ params device="/dev/drbd0" directory="/srv/nfs" fstype=ext3 \
+ op monitor interval=10s \
+ meta target-role=Started
+primitive virtual-ip IPaddr2 \
+ params ip=10.2.13.110 iflabel=1 \
+ op monitor interval=10s \
+ op start interval=0 \
+ meta target-role=Started
+group svc fs virtual-ip \
+ meta target-role=Stopped
+...............
+
+Even though the element +svc+ should be stopped, the group is
+actually running because all its members have the +target-role+
+set to +Started+:
+...............
+crm(live)# resource show svc
+resource svc is running on: xen-f
+...............
+
+Hence, if the user invokes +resource stop svc+ the intention is
+not clear. This preference gives the user an opportunity to
+better control what happens if attributes of group members have
+values which are in conflict with the same attribute of the group
+itself.
+
+Possible values are +ask+ (the default), +always+, and +never+.
+If set to +always+, the crm shell removes all children attributes
+which have values different from the parent. If set to +never+,
+all children attributes are left intact. Finally, if set to
++ask+, the user will be asked for each member what is to be done.
+
+[[cmdhelp_options_output,set output type]]
+==== `output`
+
+`crm` can adorn configurations in two ways: in color (similar to
+for instance the `ls --color` command) and by showing keywords in
+upper case. Possible values are `plain`, `color-always`, `color`,
+and 'uppercase'. It is possible to combine `uppercase` with one
+of the color values in order to get an upper case xmass tree. Just
+set this option to `color,uppercase` or `color-always,uppercase`.
+In case you need color codes in pipes, `color-always` forces color
+codes even in case the terminal is not a tty (just like `ls
+--color=always`).
+
+[[cmdhelp_options_pager,set preferred pager program]]
+==== `pager`
+
+The `view` command displays text through a pager. Use this to
+specify your preferred pager program. If not set, it will default
+to either the value of the `PAGER` environment variable or to one
+of the standard UNIX system pagers (`less`,`more`,`pg`).
+
+[[cmdhelp_options_reset,reset user preferences to factory defaults]]
+==== `reset`
+
+This command resets all user options to the defaults. If used as
+a single-shot command, the rc file (+$HOME/.config/crm/rc+) is
+reset to the defaults too.
+
+[[cmdhelp_options_save,save the user preferences to the rc file]]
+==== `save`
+
+Save current settings to the rc file (+$HOME/.config/crm/rc+). On
+further `crm` runs, the rc file is automatically read and parsed.
+
+[[cmdhelp_options_set,Set the value of a given option]]
+==== `set`
+
+Sets the value of an option. Takes the fully qualified
+name of the option as argument, as displayed by +show all+.
+
+The modified option value is stored in the user-local
+configuration file, usually found in +~/.config/crm/crm.conf+.
+
+Usage:
+........
+set <option> <value>
+........
+
+Example:
+........
+set color.warn "magenta bold"
+set editor nano
+........
+
+[[cmdhelp_options_show,show current user preference]]
+==== `show`
+
+Display all current settings.
+
+Given an option name as argument, `show` will display only the value
+of that argument.
+
+Given +all+ as argument, `show` displays all available user options.
+
+Usage:
+........
+show [all|<option>]
+........
+
+Example:
+........
+show
+show skill-level
+show all
+........
+
+[[cmdhelp_options_skill-level,set skill level]]
+==== `skill-level`
+
+Based on the skill-level setting, the user is allowed to use only
+a subset of commands. There are three levels: operator,
+administrator, and expert. The operator level allows only
+commands at the `resource` and `node` levels, but not editing
+or deleting resources. The administrator may do that and may also
+configure the cluster at the `configure` level and manage the
+shadow CIBs. The expert may do all.
+
+Usage:
+...............
+skill-level <level>
+
+level :: operator | administrator | expert
+...............
+
+.Note on security
+****************************
+The `skill-level` option is advisory only. There is nothing
+stopping any users change their skill level (see
+<<topics_Features_Security,Access Control Lists (ACL)>> on how to enforce
+access control).
+****************************
+
+[[cmdhelp_options_sort-elements,sort CIB elements]]
+==== `sort-elements`
+
+`crm` by default sorts CIB elements. If you want them appear in
+the order they were created, set this option to `no`.
+
+Usage:
+...............
+sort-elements {yes|no}
+...............
+Example:
+...............
+sort-elements no
+...............
+
+[[cmdhelp_options_user,set the cluster user]]
+==== `user`
+
+Sufficient privileges are necessary in order to manage a
+cluster: programs such as `crm_verify` or `crm_resource` and,
+ultimately, `cibadmin` have to be run either as `root` or as the
+CRM owner user (typically `hacluster`). You don't have to worry
+about that if you run `crm` as `root`. A more secure way is to
+run the program with your usual privileges, set this option to
+the appropriate user (such as `hacluster`), and setup the
+`sudoers` file.
+
+Usage:
+...............
+user system-user
+...............
+Example:
+...............
+user hacluster
+...............
+
+[[cmdhelp_options_wait,synchronous operation]]
+==== `wait`
+
+In normal operation, `crm` runs a command and gets back
+immediately to process other commands or get input from the user.
+With this option set to `yes` it will wait for the started
+transition to finish. In interactive mode dots are printed to
+indicate progress.
+
+Usage:
+...............
+wait {yes|no}
+...............
+Example:
+...............
+wait yes
+...............
+
+[[cmdhelp_configure,CIB configuration]]
+=== `configure` - CIB configuration
+
+This level enables all CIB object definition commands.
+
+The configuration may be logically divided into four parts:
+nodes, resources, constraints, and (cluster) properties and
+attributes. Each of these commands support one or more basic CIB
+objects.
+
+Nodes and attributes describing nodes are managed using the
+`node` command.
+
+Commands for resources are:
+
+- `primitive`
+- `monitor`
+- `group`
+- `clone`
+- `ms`/`master` (master-slave)
+
+In order to streamline large configurations, it is possible to
+define a template which can later be referenced in primitives:
+
+- `rsc_template`
+
+In that case the primitive inherits all attributes defined in the
+template.
+
+There are three types of constraints:
+
+- `location`
+- `colocation`
+- `order`
+
+It is possible to define fencing order (stonith resource
+priorities):
+
+- `fencing_topology`
+
+Finally, there are the cluster properties, resource meta
+attributes defaults, and operations defaults. All are just a set
+of attributes. These attributes are managed by the following
+commands:
+
+- `property`
+- `rsc_defaults`
+- `op_defaults`
+
+In addition to the cluster configuration, the Access Control
+Lists (ACL) can be setup to allow access to parts of the CIB for
+users other than +root+ and +hacluster+. The following commands
+manage ACL:
+
+- `user`
+- `role`
+
+In Pacemaker 1.1.12 and up, this command replaces the `user` command
+for handling ACLs:
+
+- `acl_target`
+
+The changes are applied to the current CIB only on ending the
+configuration session or using the `commit` command.
+
+Comments start with +#+ in the first line. The comments are tied
+to the element which follows. If the element moves, its comments
+will follow.
+
+[[cmdhelp_configure_acl_target,Define target access rights]]
+==== `acl_target`
+
+Defines an ACL target.
+
+Usage:
+................
+acl_target <tid> [<role> ...]
+................
+Example:
+................
+acl_target joe resource_admin constraint_editor
+................
+
+[[cmdhelp_configure_alert,Event-driven alerts]]
+==== `alert`
+
+.Version note
+****************************
+This feature is only available
+in Pacemaker 1.1.15+.
+****************************
+
+Event-driven alerts enables calling scripts whenever interesting
+events occur in the cluster (nodes joining or leaving, resources
+starting or stopping, etc.).
+
+The +path+ is an arbitrary file path to an alert script. Existing
+external scripts used with ClusterMon resources can be used as alert
+scripts, since the interface is compatible.
+
+Each alert may have a number of receipients configured. These will be
+passed to the script as arguments. The first recipient will also be
+passed as the +CRM_alert_recipient+ environment variable, for
+compatibility with existing scripts that only support one recipient.
+
+The available meta attributes are +timeout+ (default 30s) and
++timestamp-format+ (default `"%H:%M:%S.%06N"`).
+
+Some configurations may require each recipient to be delimited by
+brackets, to avoid ambiguity. In the example +alert-2+ below, the meta
+attribute for `timeout` is defined after the recipient, so the
+brackets are used to ensure that the meta attribute is set for the
+alert and not just the recipient. This can be avoided by setting any
+alert attributes before defining the recipients.
+
+Usage:
+...............
+alert <id> <path> \
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] \
+ [to [{] <recipient>
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] [}] \
+ ...]
+...............
+
+Example:
+...............
+alert alert-1 /srv/pacemaker/pcmk_alert_sample.sh \
+ to /var/log/cluster-alerts.log
+
+alert alert-2 /srv/pacemaker/example_alert.sh \
+ meta timeout=60s \
+ to { /var/log/cluster-alerts.log }
+...............
+
+[[cmdhelp_configure_cib,CIB shadow management]]
+==== `cib`
+
+This level is for management of shadow CIBs. It is available at
+the `configure` level to enable saving intermediate changes to a
+shadow CIB instead of to the live cluster. This short excerpt
+shows how:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+Note how the current CIB in the prompt changed from +live+ to
++test-2+ after issuing the `cib new` command. See also the
+<<cmdhelp_cib,CIB shadow management>> for more information.
+
+[[cmdhelp_configure_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_configure_clone,define a clone]]
+==== `clone`
+
+The `clone` command creates a resource clone. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+clone <name> <rsc>
+ [description=<description>]
+ [meta <attr_list>]
+ [params <attr_list>]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+clone cl_fence apc_1 \
+ meta clone-node-max=1 globally-unique=false
+...............
+
+[[cmdhelp_configure_colocation,colocate resources]]
+==== `colocation` (`collocation`)
+
+This constraint expresses the placement relation between two
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+The score is used to indicate the priority of the constraint. A
+positive score indicates that the resources should run on the same
+node. A negative score that they should not run on the same
+node. Values of positive or negative +infinity+ indicate a mandatory
+constraint.
+
+In the two resource form, the cluster will place +<with-rsc>+ first,
+and then decide where to put the +<rsc>+ resource.
+
+Collocation resource sets have an extra attribute (+sequential+)
+to allow for sets of resources which don't depend on each other
+in terms of state. The shell syntax for such sets is to put
+resources in parentheses.
+
+Sets cannot be nested.
+
+The optional +node-attribute+ can be used to colocate resources on a
+set of nodes and not necessarily on the same node. For example, by
+setting a node attribute +color+ on all nodes and setting the
++node-attribute+ value to +color+ as well, the colocated resources
+will be placed on any node that has the same color.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+colocation <id> <score>: <rsc>[:<role>] <with-rsc>[:<role>]
+ [node-attribute=<node_attr>]
+
+colocation <id> <score>: <resource_sets>
+ [node-attribute=<node_attr>]
+
+resource_sets :: <resource_set> [<resource_set> ...]
+
+resource_set :: ["("|"["] <rsc>[:<role>] [<rsc>[:<role>] ...] \
+ [<attributes>] [")"|"]"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+colocation never_put_apache_with_dummy -inf: apache dummy
+colocation c1 inf: A ( B C )
+...............
+
+[[cmdhelp_configure_commit,commit the changes to the CIB]]
+==== `commit`
+
+Commit the current configuration to the CIB in use. As noted
+elsewhere, commands in a configure session don't have immediate
+effect on the CIB. All changes are applied at one point in time,
+either using `commit` or when the user leaves the configure
+level. In case the CIB in use changed in the meantime, presumably
+by somebody else, the crm shell will refuse to apply the changes.
+
+If you know that it's fine to still apply them, add +force+ to the
+command line.
+
+To disable CIB patching and apply the changes by replacing the CIB
+completely, add +replace+ to the command line. Note that this can lead
+to previous changes being overwritten if some other process
+concurrently modifies the CIB.
+
+Usage:
+...............
+commit [force] [replace]
+...............
+
+[[cmdhelp_configure_default-timeouts,set timeouts for operations to minimums from the meta-data]]
+==== `default-timeouts`
+
+This command takes the timeouts from the actions section of the
+resource agent meta-data and sets them for the operations of the
+primitive.
+
+Usage:
+...............
+default-timeouts <id> [<id>...]
+...............
+
+.Note on `default-timeouts`
+****************************
+The use of this command is discouraged in favor of manually
+determining the best timeouts required for the particular
+configuration. Relying on the resource agent to supply appropriate
+timeouts can cause the resource to fail at the worst possible moment.
+
+Appropriate timeouts for resource actions are context-sensitive, and
+should be carefully considered with the whole configuration in mind.
+****************************
+
+[[cmdhelp_configure_delete,delete CIB objects]]
+==== `delete`
+
+Delete one or more objects. If an object to be deleted belongs to
+a container object, such as a group, and it is the only resource
+in that container, then the container is deleted as well. Any
+related constraints are removed as well.
+
+If the object is a started resource, it will not be deleted unless the
++--force+ flag is passed to the command, or the +force+ option is set.
+
+Usage:
+...............
+delete [--force] <id> [<id>...]
+...............
+
+[[cmdhelp_configure_edit,edit CIB objects]]
+==== `edit`
+
+This command invokes the editor with the object description. As
+with the `show` command, the user may choose to edit all objects
+or a set of objects.
+
+If the user insists, he or she may edit the XML edition of the
+object. If you do that, don't modify any id attributes.
+
+Usage:
+...............
+edit [xml] [<id> ...]
+edit [xml] changed
+...............
+
+.Note on renaming element ids
+****************************
+The edit command sometimes cannot properly handle modifying
+element ids. In particular for elements which belong to group or
+ms resources. Group and ms resources themselves also cannot be
+renamed. Please use the `rename` command instead.
+****************************
+
+[[cmdhelp_configure_erase,erase the CIB]]
+==== `erase`
+
+The `erase` clears all configuration. Apart from nodes. To remove
+nodes, you have to specify an additional keyword `nodes`.
+
+Note that removing nodes from the live cluster may have some
+strange/interesting/unwelcome effects.
+
+Usage:
+...............
+erase [nodes]
+...............
+
+[[cmdhelp_configure_fencing_topology,node fencing order]]
+==== `fencing_topology`
+
+If multiple fencing (stonith) devices are available capable of
+fencing a node, their order may be specified by +fencing_topology+.
+The order is specified per node.
+
+Stonith resources can be separated by +,+ in which case all of
+them need to succeed. If they fail, the next stonith resource (or
+set of resources) is used. In other words, use comma to separate
+resources which all need to succeed and whitespace for serial
+order. It is not allowed to use whitespace around comma.
+
+If the node is left out, the order is used for all nodes.
+That should reduce the configuration size in some stonith setups.
+
+From Pacemaker version 1.1.14, it is possible to use a node attribute
+as the +target+ in a fencing topology. The syntax for this usage is
+described below.
+
+From Pacemaker version 1.1.14, it is also possible to use regular
+expression patterns as the +target+ in a fencing topology. The configured
+fencing sequence then applies to all devices matching the pattern.
+
+Usage:
+...............
+fencing_topology <stonith_resources> [<stonith_resources> ...]
+fencing_topology <fencing_order> [<fencing_order> ...]
+
+fencing_order :: <target> <stonith_resources> [<stonith_resources> ...]
+
+stonith_resources :: <rsc>[,<rsc>...]
+target :: <node>: | attr:<node-attribute>=<value> | pattern:<pattern>
+...............
+Example:
+...............
+# Only kill the power if poison-pill fails
+fencing_topology poison-pill power
+
+# As above for node-a, but a different strategy for node-b
+fencing_topology \
+ node-a: poison-pill power \
+ node-b: ipmi serial
+
+# Fencing anything on rack 1 requires fencing via both APC 1 and 2,
+# to defeat the redundancy provided by two separate UPS units.
+fencing_topology attr:rack=1 apc01,apc02
+
+# Fencing for all machines named green.* is done using the pear
+# fencing device first, while all machines named red.* are fenced
+# using the apple fencing device first.
+fencing_topology \
+ pattern:green.* pear apple \
+ pattern:red.* apple pear
+...............
+
+[[cmdhelp_configure_filter,filter CIB objects]]
+==== `filter`
+
+This command filters the given CIB elements through an external
+program. The program should accept input on `stdin` and send
+output to `stdout` (the standard UNIX filter conventions). As
+with the `show` command, the user may choose to filter all or
+just a subset of elements.
+
+It is possible to filter the XML representation of objects, but
+probably not as useful as the configuration language. The
+presentation is somewhat different from what would be displayed
+by the `show` command---each element is shown on a single line,
+i.e. there are no backslashes and no other embelishments.
+
+Don't forget to put quotes around the filter if it contains
+spaces.
+
+Usage:
+...............
+filter <prog> [xml] [<id> ...]
+filter <prog> [xml] changed
+...............
+Examples:
+...............
+filter "sed '/^primitive/s/target-role=[^ ]*//'"
+# crm configure filter "sed '/^primitive/s/target-role=[^ ]*//'"
+crm configure <<END
+ filter "sed '/threshold=\"1\"/s/=\"1\"/=\"0\"/g'"
+END
+...............
+
+.Note on quotation marks
+**************************
+Filter commands which feature a blend of quotation marks can be
+difficult to get right, especially when used directly from bash, since
+bash does its own quotation parsing. In these cases, it can be easier
+to supply the filter command as standard input. See the last example
+above.
+**************************
+
+[[cmdhelp_configure_get_property,Get property value]]
+==== `get-property`
+
+Show the value of the given property. If the value is not set, the
+command will print the default value for the property, if known.
+
+If no property name is passed to the command, the list of known
+cluster properties is printed.
+
+If the property is set multiple times, for example using multiple
+property sets with different rule expressions, the output of this
+command is undefined.
+
+Pass the argument +-t+ or +--true+ to `get-property` to translate
+the argument value into +true+ or +false+. If the value is not
+set, the command will print +false+.
+
+Usage:
+...............
+get-property [-t|--true] [<name>]
+...............
+
+Example:
+...............
+get-property stonith-enabled
+get-property -t maintenance-mode
+...............
+
+[[cmdhelp_configure_graph,generate a directed graph]]
+==== `graph`
+
+Create a graphviz graphical layout from the current cluster
+configuration.
+
+Currently, only `dot` (directed graph) is supported. It is
+essentially a visualization of resource ordering.
+
+The graph may be saved to a file which can be used as source for
+various graphviz tools (by default it is displayed in the user's
+X11 session). Optionally, by specifying the format, one can also
+produce an image instead.
+
+For more or different graphviz attributes, it is possible to save
+the default set of attributes to an ini file. If this file exists
+it will always override the builtin settings. The +exportsettings+
+subcommand also prints the location of the ini file.
+
+Usage:
+...............
+graph [<gtype> [<file> [<img_format>]]]
+graph exportsettings
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph dot
+graph dot clu1.conf.dot
+graph dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_configure_group,define a group]]
+==== `group`
+
+The `group` command creates a group of resources. This can be useful
+when resources depend on other resources and require that those
+resources start in order on the same node. A common use of resource
+groups is to ensure that a server and a virtual IP are located
+together, and that the virtual IP is started before the server.
+
+Grouped resources are started in the order they appear in the group,
+and stopped in the reverse order. If a resource in the group cannot
+run anywhere, resources following it in the group will not start.
+
+`group` can be passed the "container" meta attribute, to indicate that
+it is to be used to group VM resources monitored using Nagios. The
+resource referred to by the container attribute must be of type
+`ocf:heartbeat:Xen`, `ocf:heartbeat:VirtualDomain` or `ocf:heartbeat:lxc`.
+
+Usage:
+...............
+group <name> <rsc> [<rsc>...]
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+group internal_www disk0 fs0 internal_ip apache \
+ meta target_role=stopped
+
+group vm-and-services vm vm-sshd meta container="vm"
+...............
+
+[[cmdhelp_configure_load,import the CIB from a file]]
+==== `load`
+
+Load a part of configuration (or all of it) from a local file or
+a network URL. The +replace+ method replaces the current
+configuration with the one from the source. The +update+ method
+tries to import the contents into the current configuration. The
++push+ method imports the contents into the current configuration
+and removes any lines that are not present in the given
+configuration.
+The file may be a CLI file or an XML file.
+
+If the URL is `-`, the configuration is read from standard input.
+
+Usage:
+...............
+load [xml] <method> URL
+
+method :: replace | update | push
+...............
+Example:
+...............
+load xml update myfirstcib.xml
+load xml replace http://storage.big.com/cibs/bigcib.xml
+load xml push smallcib.xml
+...............
+
+[[cmdhelp_configure_location,a location preference]]
+==== `location`
+
+`location` defines the preference of nodes for the given
+resource. The location constraints consist of one or more rules
+which specify a score to be awarded if the rule matches.
+
+The resource referenced by the location constraint can be one of the
+following:
+
+* Plain resource reference: +location loc1 webserver 100: node1+
+* Resource set in curly brackets: +location loc1 { virtual-ip webserver } 100: node1+
+* Tag containing resource ids: +location loc1 tag1 100: node1+
+* Resource pattern: +location loc1 /web.*/ 100: node1+
+
+The +resource-discovery+ attribute allows probes to be selectively
+enabled or disabled per resource and node.
+
+The syntax for resource sets is described in detail for
+<<cmdhelp_configure_colocation,`colocation`>>.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+location <id> <rsc> [<attributes>] {<node_pref>|<rules>}
+
+rsc :: /<rsc-pattern>/
+ | { resource_sets }
+ | <rsc>
+
+attributes :: role=<role> | resource-discovery=always|never|exclusive
+
+node_pref :: <score>: <node>
+
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: string | version | number
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+...............
+Examples:
+...............
+location conn_1 internal_www 100: node1
+
+location conn_1 internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+
+location conn_2 dummy_float \
+ rule -inf: not_defined pingd or pingd number:lte 0
+
+# never probe for rsc1 on node1
+location no-probe rsc1 resource-discovery=never -inf: node1
+...............
+
+[[cmdhelp_configure_modgroup,modify group]]
+==== `modgroup`
+
+Add or remove primitives in a group. The `add` subcommand appends
+the new group member by default. Should it go elsewhere, there
+are `after` and `before` clauses.
+
+Usage:
+...............
+modgroup <id> add <id> [after <id>|before <id>]
+modgroup <id> remove <id>
+...............
+Examples:
+...............
+modgroup share1 add storage2 before share1-fs
+...............
+
+[[cmdhelp_configure_monitor,add monitor operation to a primitive]]
+==== `monitor`
+
+Monitor is by far the most common operation. It is possible to
+add it without editing the whole resource. Also, long primitive
+definitions may be a bit uncluttered. In order to make this
+command as concise as possible, less common operation attributes
+are not available. If you need them, then use the `op` part of
+the `primitive` command.
+
+Usage:
+...............
+monitor <rsc>[:<role>] <interval>[:<timeout>]
+...............
+Example:
+...............
+monitor apcfence 60m:60s
+...............
+
+Note that after executing the command, the monitor operation may
+be shown as part of the primitive definition.
+
+[[cmdhelp_configure_ms,define a master-slave resource]]
+==== `ms` (`master`)
+
+The `ms` command creates a master/slave resource type. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+ms <name> <rsc>
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+ms disk1 drbd1 \
+ meta notify=true globally-unique=false
+...............
+
+.Note on `id-ref` usage
+****************************
+Instance or meta attributes (`params` and `meta`) may contain
+a reference to another set of attributes. In that case, no other
+attributes are allowed. Since attribute sets' ids, though they do
+exist, are not shown in the `crm`, it is also possible to
+reference an object instead of an attribute set. `crm` will
+automatically replace such a reference with the right id:
+
+...............
+crm(live)configure# primitive a2 www-2 meta $id-ref=a1
+crm(live)configure# show a2
+primitive a2 apache \
+ meta $id-ref=a1-meta_attributes
+ [...]
+...............
+It is advisable to give meaningful names to attribute sets which
+are going to be referenced.
+****************************
+
+[[cmdhelp_configure_node,define a cluster node]]
+==== `node`
+
+The node command describes a cluster node. Nodes in the CIB are
+commonly created automatically by the CRM. Hence, you should not
+need to deal with nodes unless you also want to define node
+attributes. Note that it is also possible to manage node
+attributes at the `node` level.
+
+Usage:
+...............
+node [$id=<id>] <uname>[:<type>]
+ [description=<description>]
+ [attributes [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+ [utilization [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+
+type :: normal | member | ping | remote
+...............
+Example:
+...............
+node node1
+node big_node attributes memory=64
+...............
+
+[[cmdhelp_configure_op_defaults,set resource operations defaults]]
+==== `op_defaults`
+
+Set defaults for the operations meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+op_defaults [$id=<set_id>] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+op_defaults record-pending=true
+...............
+
+[[cmdhelp_configure_order,order resources]]
+==== `order`
+
+This constraint expresses the order of actions on two resources
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+Ordered resource sets have an extra attribute to allow for sets
+of resources whose actions may run in parallel. The shell syntax
+for such sets is to put resources in parentheses.
+
+If the subsequent resource can start or promote after any one of the
+resources in a set has done, enclose the set in brackets (+[+ and +]+).
+
+Sets cannot be nested.
+
+Three strings are reserved to specify a kind of order constraint:
++Mandatory+, +Optional+, and +Serialize+. It is preferred to use
+one of these settings instead of score. Previous versions mapped
+scores +0+ and +inf+ to keywords +advisory+ and +mandatory+.
+That is still valid but deprecated.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+order <id> [{kind|<score>}:] first then [symmetrical=<bool>]
+
+order <id> [{kind|<score>}:] resource_sets [symmetrical=<bool>]
+
+kind :: Mandatory | Optional | Serialize
+
+first :: <rsc>[:<action>]
+
+then :: <rsc>[:<action>]
+
+resource_sets :: resource_set [resource_set ...]
+
+resource_set :: ["["|"("] <rsc>[:<action>] [<rsc>[:<action>] ...] \
+ [attributes] ["]"|")"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+order o-1 Mandatory: apache:start ip_1
+order o-2 Serialize: A ( B C )
+order o-3 inf: [ A B ] C
+order o-4 first-resource then-resource
+...............
+
+[[cmdhelp_configure_primitive,define a resource]]
+==== `primitive`
+
+The primitive command describes a resource. It may be referenced
+only once in group, clone, or master-slave objects. If it's not
+referenced, then it is placed as a single resource in the CIB.
+
+Operations may be specified anonymously, as a group or by reference:
+
+* "Anonymous", as a list of +op+ specifications. Use this
+ method if you don't need to reference the set of operations
+ elsewhere. This is the most common way to define operations.
+
+* If reusing operation sets is desired, use the +operations+ keyword
+ along with an id to give the operations set a name. Use the
+ +operations+ keyword and an id-ref value set to the id of another
+ operations set, to apply the same set of operations to this
+ primitive.
+
+Operation attributes which are not recognized are saved as
+instance attributes of that operation. A typical example is
++OCF_CHECK_LEVEL+.
+
+For multistate resources, roles are specified as +role=<role>+.
+
+A template may be defined for resources which are of the same
+type and which share most of the configuration. See
+<<cmdhelp_configure_rsc_template,`rsc_template`>> for more information.
+
+Attributes containing time values, such as the +interval+ attribute on
+operations, are configured either as a plain number, which is
+interpreted as a time in seconds, or using one of the following
+suffixes:
+
+* +s+, +sec+ - time in seconds (same as no suffix)
+* +ms+, +msec+ - time in milliseconds
+* +us+, +usec+ - time in microseconds
+* +m+, +min+ - time in minutes
+* +h+, +hr+ - time in hours
+
+Usage:
+...............
+primitive <rsc> {[<class>:[<provider>:]]<type>|@<template>}
+ [description=<description>]
+ [[params] attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+attr_list :: [$id=<id>] [<score>:] [rule...]
+ <attr>=<val> [<attr>=<val>...]] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s \
+ op monitor interval=30m timeout=60s
+
+primitive www8 apache \
+ configfile=/etc/apache/www8.conf \
+ operations $id-ref=apache_ops
+
+primitive db0 mysql \
+ params config=/etc/mysql/db0.conf \
+ op monitor interval=60s \
+ op monitor interval=300s OCF_CHECK_LEVEL=10
+
+primitive r0 ocf:linbit:drbd \
+ params drbd_resource=r0 \
+ op monitor role=Master interval=60s \
+ op monitor role=Slave interval=300s
+
+primitive xen0 @vm_scheme1 xmfile=/etc/xen/vm/xen0
+
+primitive mySpecialRsc Special \
+ params 3: rule #uname eq node1 interface=eth1 \
+ params 2: rule #uname eq node2 interface=eth2 port=8888 \
+ params 1: interface=eth0 port=9999
+
+...............
+
+[[cmdhelp_configure_property,set a cluster property]]
+==== `property`
+
+Set cluster configuration properties. To list the
+available cluster configuration properties, use the
+<<cmdhelp_ra_info,`ra info`>> command with +pengine+, +crmd+,
++cib+ and +stonithd+ as arguments.
+When setting the +maintenance-mode+ property, it will
+inform the user if there are nodes or resources that
+have the +maintenance+ property.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+property [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+property stonith-enabled=true
+property rule date spec years=2014 stonith-enabled=false
+...............
+
+[[cmdhelp_configure_ptest,show cluster actions if changes were committed]]
+==== `ptest` (`simulate`)
+
+Show PE (Policy Engine) motions using `ptest(8)` or
+`crm_simulate(8)`.
+
+A CIB is constructed using the current user edited configuration
+and the status from the running CIB. The resulting CIB is run
+through `ptest` (or `crm_simulate`) to show changes which would
+happen if the configuration is committed.
+
+The status section may be loaded from another source and modified
+using the <<cmdhelp_cibstatus,`cibstatus`>> level commands. In that case, the
+`ptest` command will issue a message informing the user that the
+Policy Engine graph is not calculated based on the current status
+section and therefore won't show what would happen to the
+running but some imaginary cluster.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Add a string of +v+ characters to increase verbosity. `ptest`
+can also show allocation scores. +utilization+ turns on
+information about the remaining capacity of nodes. With the
++actions+ option, `ptest` will print all resource actions.
+
+The `ptest` program has been replaced by `crm_simulate` in newer
+Pacemaker versions. In some installations both could be
+installed. Use `simulate` to enfore using `crm_simulate`.
+
+Usage:
+...............
+ptest [nograph] [v...] [scores] [actions] [utilization]
+...............
+Examples:
+...............
+ptest scores
+ptest vvvvv
+simulate actions
+...............
+
+[[cmdhelp_configure_refresh,refresh from CIB]]
+==== `refresh`
+
+Refresh the internal structures from the CIB. All changes made
+during this session are lost.
+
+Usage:
+...............
+refresh
+...............
+
+[[cmdhelp_configure_rename,rename a CIB object]]
+==== `rename`
+
+Rename an object. It is recommended to use this command to rename
+a resource, because it will take care of updating all related
+constraints and a parent resource. Changing ids with the edit
+command won't have the same effect.
+
+If you want to rename a resource, it must be in the stopped state.
+
+Usage:
+...............
+rename <old_id> <new_id>
+...............
+
+[[cmdhelp_configure_role,define role access rights]]
+==== `role`
+
+An ACL role is a set of rules which describe access rights to
+CIB. Rules consist of an access right +read+, +write+, or +deny+
+and a specification denoting part of the configuration to which
+the access right applies. The specification can be an XPath or a
+combination of tag and id references. If an attribute is
+appended, then the specification applies only to that attribute
+of the matching element.
+
+There is a number of shortcuts for XPath specifications. The
++meta+, +params+, and +utilization+ shortcuts reference resource
+meta attributes, parameters, and utilization respectively. The
+`location` may be used to specify location constraints most of
+the time to allow resource `move` and `unmove` commands. The
+`property` references cluster properties. The `node` allows
+reading node attributes. +nodeattr+ and +nodeutil+ reference node
+attributes and node capacity (utilization). The `status` shortcut
+references the whole status section of the CIB. Read access to
+status is necessary for various monitoring tools such as
+`crm_mon(8)` (aka `crm status`).
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+role <role-id> rule [rule ...]
+
+rule :: acl-right cib-spec [attribute:<attribute>]
+
+acl-right :: read | write | deny
+
+cib-spec :: xpath-spec | tag-ref-spec
+xpath-spec :: xpath:<xpath> | shortcut
+tag-ref-spec :: tag:<tag> | ref:<id> | tag:<tag> ref:<id>
+
+shortcut :: meta:<rsc>[:<attr>]
+ params:<rsc>[:<attr>]
+ utilization:<rsc>
+ location:<rsc>
+ property[:<attr>]
+ node[:<node>]
+ nodeattr[:<attr>]
+ nodeutil[:<node>]
+ status
+...............
+Example:
+...............
+role app1_admin \
+ write meta:app1:target-role \
+ write meta:app1:is-managed \
+ write location:app1 \
+ read ref:app1
+...............
+
+[[cmdhelp_configure_rsc_defaults,set resource defaults]]
+==== `rsc_defaults`
+
+Set defaults for the resource meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+rsc_defaults [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+rsc_defaults failure-timeout=3m
+...............
+
+[[cmdhelp_configure_rsc_template,define a resource template]]
+==== `rsc_template`
+
+The `rsc_template` command creates a resource template. It may be
+referenced in primitives. It is used to reduce large
+configurations with many similar resources.
+
+Usage:
+...............
+rsc_template <name> [<class>:[<provider>:]]<type>
+ [description=<description>]
+ [params attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+rsc_template public_vm Xen \
+ op start timeout=300s \
+ op stop timeout=300s \
+ op monitor interval=30s timeout=60s \
+ op migrate_from timeout=600s \
+ op migrate_to timeout=600s
+primitive xen0 @public_vm \
+ params xmfile=/etc/xen/xen0
+primitive xen1 @public_vm \
+ params xmfile=/etc/xen/xen1
+...............
+
+[[cmdhelp_configure_rsc_ticket,resources ticket dependency]]
+==== `rsc_ticket`
+
+This constraint expresses dependency of resources on cluster-wide
+attributes, also known as tickets. Tickets are mainly used in
+geo-clusters, which consist of multiple sites. A ticket may be
+granted to a site, thus allowing resources to run there.
+
+The +loss-policy+ attribute specifies what happens to the
+resource (or resources) if the ticket is revoked. The default is
+either +stop+ or +demote+ depending on whether a resource is
+multi-state.
+
+See also the <<cmdhelp_site_ticket,`site`>> set of commands.
+
+Usage:
+...............
+rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]
+
+loss_policy_action :: stop | demote | fence | freeze
+...............
+Example:
+...............
+rsc_ticket ticket-A_public-ip ticket-A: public-ip
+rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence
+rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master
+...............
+
+
+[[cmdhelp_configure_rsctest,test resources as currently configured]]
+==== `rsctest`
+
+Test resources with current resource configuration. If no nodes
+are specified, tests are run on all known nodes.
+
+The order of resources is significant: it is assumed that later
+resources depend on earlier ones.
+
+If a resource is multi-state, it is assumed that the role on
+which later resources depend is master.
+
+Tests are run sequentially to prevent running the same resource
+on two or more nodes. Tests are carried out only if none of the
+specified nodes currently run any of the specified resources.
+However, it won't verify whether resources run on the other
+nodes.
+
+Superuser privileges are obviously required: either run this as
+root or setup the `sudoers` file appropriately.
+
+Note that resource testing may take some time.
+
+Usage:
+...............
+rsctest <rsc_id> [<rsc_id> ...] [<node_id> ...]
+...............
+Examples:
+...............
+rsctest my_ip websvc
+rsctest websvc nodeB
+...............
+
+[[cmdhelp_configure_save,save the CIB to a file]]
+==== `save`
+
+Save the current configuration to a file. Optionally, as XML. Use
++-+ instead of file name to write the output to `stdout`.
+
+The `save` command accepts the same selection arguments as the `show`
+command. See the <<cmdhelp_configure_show,help section>> for `show`
+for more details.
+
+Usage:
+...............
+save [xml] [<id> | type:<type | tag:<tag> |
+ related:<obj> | changed ...] <file>
+...............
+Example:
+...............
+save myfirstcib.txt
+save web-server server-config.txt
+...............
+
+[[cmdhelp_configure_schema,set or display current CIB RNG schema]]
+==== `schema`
+
+CIB's content is validated by a RNG schema. Pacemaker supports
+several, depending on version. At least the following schemas are
+accepted by `crmsh`:
+
+* +pacemaker-1.0+
+* +pacemaker-1.1+
+* +pacemaker-1.2+
+* +pacemaker-1.3+
+* +pacemaker-2.0+
+
+Use this command to display or switch to another RNG schema.
+
+Usage:
+...............
+schema [<schema>]
+...............
+Example:
+...............
+schema pacemaker-1.1
+...............
+
+[[cmdhelp_configure_set,set an attribute value]]
+==== `set`
+
+Set the value of a configured attribute. The attribute must
+have a value configured previously, and can be an agent
+parameter, meta attribute or utilization value.
+
+The first argument to the command is a path to an attribute.
+This is a dot-separated sequence beginning with the name of
+the resource, and ending with the name of the attribute to
+set.
+
+Usage:
+...............
+set <path> <value>
+...............
+Examples:
+...............
+set vip1.ip 192.168.20.5
+set vm-a.force_stop 1
+...............
+
+[[cmdhelp_configure_show,display CIB objects]]
+==== `show`
+
+The `show` command displays CIB objects. Without any argument, it
+displays all objects in the CIB, but the set of objects displayed by
+`show` can be limited to only objects with the given IDs or by using
+one or more of the special prefixes described below.
+
+The XML representation for the objects can be displayed by passing
++xml+ as the first argument.
+
+To show one or more specific objects, pass the object IDs as
+arguments.
+
+To show all objects of a certain type, use the +type:+ prefix.
+
+To show all objects in a tag, use the +tag:+ prefix.
+
+To show all constraints related to a primitive, use the +related:+ prefix.
+
+To show all modified objects, pass the argument +changed+.
+
+The prefixes can be used together on a single command line. For
+example, to show both the tag itself and the objects tagged by it the
+following combination can be used: +show tag:my-tag my-tag+.
+
+To refine a selection of objects using multiple modifiers, the keywords
++and+ and +or+ can be used. For example, to select all primitives tagged
++foo+, the following combination can be used:
++show type:primitive and tag:foo+.
+
+To hide values when displaying the configuration, use the
++obscure:<glob>+ argument. This can be useful when sending the
+configuration over a public channel, to avoid exposing potentially
+sensitive information. The +<glob>+ argument is a bash-style pattern
+matching attribute keys.
+
+Usage:
+...............
+show [xml] [<id>
+ | changed
+ | type:<type>
+ | tag:<id>
+ | related:<obj>
+ | obscure:<glob>
+ ...]
+
+type :: node | primitive | group | clone | ms | rsc_template
+ | location | colocation | order
+ | rsc_ticket
+ | property | rsc_defaults | op_defaults
+ | fencing_topology
+ | role | user | acl_target
+ | tag
+...............
+
+Example:
+...............
+show webapp
+show type:primitive
+show xml tag:db tag:fs
+show related:webapp
+show type:primitive obscure:passwd
+...............
+
+[[cmdhelp_configure_tag,Define resource tags]]
+==== `tag`
+
+Define a resource tag. A tag is an id referring to one or more
+resources, without implying any constraints between the tagged
+resources. This can be useful for grouping conceptually related
+resources.
+
+Usage:
+...............
+tag <tag-name>: <rsc> [<rsc> ...]
+tag <tag-name> <rsc> [<rsc> ...]
+...............
+Example:
+...............
+tag web: p-webserver p-vip
+tag ips server-vip admin-vip
+...............
+
+[[cmdhelp_configure_template,edit and import a configuration from a template]]
+==== `template`
+
+The specified template is loaded into the editor. It's up to the
+user to make a good CRM configuration out of it. See also the
+<<cmdhelp_template,template section>>.
+
+Usage:
+...............
+template [xml] url
+...............
+Example:
+...............
+template two-apaches.txt
+...............
+
+[[cmdhelp_configure_upgrade,upgrade the CIB]]
+==== `upgrade`
+
+Attempts to upgrade the CIB to validate with the current
+version. Commonly, this is required if the error
+`CIB not supported` occurs. It typically means that the
+active CIB version is coming from an older release.
+
+As a safety precaution, the force argument is required if the
++validation-with+ attribute is set to anything other than
++0.6+. Thus in most cases, it is required.
+
+Usage:
+...............
+upgrade [force]
+...............
+
+Example:
+...............
+upgrade force
+...............
+
+[[cmdhelp_configure_user,define user access rights]]
+==== `user`
+
+Users which normally cannot view or manage cluster configuration
+can be allowed access to parts of the CIB. The access is defined
+by a set of +read+, +write+, and +deny+ rules as in role
+definitions or by referencing roles. The latter is considered
+best practice.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+user <uid> {roles|rules}
+
+roles :: role:<role-ref> [role:<role-ref> ...]
+rules :: rule [rule ...]
+...............
+Example:
+...............
+user joe \
+ role:app1_admin \
+ role:read_all
+...............
+
+[[cmdhelp_configure_validate_all,call agent validate-all for resource]]
+==== `validate-all`
+
+Call the `validate-all` action for the resource, if possible.
+
+Limitations:
+
+* The resource agent must implement the `validate-all` action.
+* The current user must be root.
+* The primitive resource must not use nvpair references.
+
+Usage:
+...............
+validate-all <rsc>
+...............
+
+
+[[cmdhelp_configure_verify,verify the CIB with crm_verify]]
+==== `verify`
+
+Verify the contents of the CIB which would be committed.
+
+Usage:
+...............
+verify
+...............
+
+[[cmdhelp_configure_xml,raw xml]]
+==== `xml`
+
+Even though we promissed no xml, it may happen, but hopefully
+very very seldom, that an element from the CIB cannot be rendered
+in the configuration language. In that case, the element will be
+shown as raw xml, prefixed by this command. That element can then
+be edited like any other. If the shell finds out that after the
+change it can digest it, then it is going to be converted into
+the normal configuration language. Otherwise, there is no need to
+use `xml` for configuration.
+
+Usage:
+...............
+xml <xml>
+...............
+
+[[cmdhelp_template,edit and import a configuration from a template]]
+=== `template` - Import configuration from templates
+
+User may be assisted in the cluster configuration by templates
+prepared in advance. Templates consist of a typical ready
+configuration which may be edited to suit particular user needs.
+
+This command enters a template level where additional commands
+for configuration/template management are available.
+
+[[cmdhelp_template_apply,process and apply the current configuration to the current CIB]]
+==== `apply`
+
+Copy the current or given configuration to the current CIB. By
+default, the CIB is replaced, unless the method is set to
+"update".
+
+Usage:
+...............
+apply [<method>] [<config>]
+
+method :: replace | update
+...............
+
+[[cmdhelp_template_delete,delete a configuration]]
+==== `delete`
+
+Remove a configuration. The loaded (active) configuration may be
+removed by force.
+
+Usage:
+...............
+delete <config> [force]
+...............
+
+[[cmdhelp_template_edit,edit a configuration]]
+==== `edit`
+
+Edit current or given configuration using your favourite editor.
+
+Usage:
+...............
+edit [<config>]
+...............
+
+[[cmdhelp_template_list,list configurations/templates]]
+==== `list`
+
+When called with no argument, lists existing templates and
+configurations.
+
+Given the argument +templates+, lists the available templates.
+
+Given the argument +configs+, lists the available configurations.
+
+Usage:
+...............
+list [templates|configs]
+...............
+
+[[cmdhelp_template_load,load a configuration]]
+==== `load`
+
+Load an existing configuration. Further `edit`, `show`, and
+`apply` commands will refer to this configuration.
+
+Usage:
+...............
+load <config>
+...............
+
+[[cmdhelp_template_new,create a new configuration from templates]]
+==== `new`
+
+Create a new configuration from one or more templates. Note that
+configurations and templates are kept in different places, so it
+is possible to have a configuration name equal a template name.
+
+If you already know which parameters are required, you can set
+them directly on the command line.
+
+The parameter name +id+ is set by default to the name of the
+configuration.
+
+If no parameters are being set and you don't want a particular name
+for your configuration, you can call this command with a template name
+as the only parameter. A unique configuration name based on the
+template name will be generated.
+
+Usage:
+...............
+new [<config>] <template> [<template> ...] [params name=value ...]
+...............
+
+Example:
+...............
+new vip virtual-ip
+new bigfs ocfs2 params device=/dev/sdx8 directory=/bigfs
+new apache
+...............
+
+[[cmdhelp_template_show,show the processed configuration]]
+==== `show`
+
+Process the current or given configuration and display the result.
+
+Usage:
+...............
+show [<config>]
+...............
+
+[[cmdhelp_cibstatus,CIB status management and editing]]
+=== `cibstatus` - CIB status management and editing
+
+The `status` section of the CIB keeps the current status of nodes
+and resources. It is modified _only_ on events, i.e. when some
+resource operation is run or node status changes. For obvious
+reasons, the CRM has no user interface with which it is possible
+to affect the status section. From the user's point of view, the
+status section is essentially a read-only part of the CIB. The
+current status is never even written to disk, though it is
+available in the PE (Policy Engine) input files which represent
+the history of cluster motions. The current status may be read
+using the +cibadmin -Q+ command.
+
+It may sometimes be of interest to see how status changes would
+affect the Policy Engine. The set of `cibstatus` level commands
+allow the user to load status sections from various sources and
+then insert or modify resource operations or change nodes' state.
+
+The effect of those changes may then be observed by running the
+<<cmdhelp_configure_ptest,`ptest`>> command at the `configure` level
+or `simulate` and `run` commands at this level. The `ptest`
+runs with the user edited CIB whereas the latter two commands
+run with the CIB which was loaded along with the status section.
+
+The `simulate` and `run` commands as well as all status
+modification commands are implemented using `crm_simulate(8)`.
+
+[[cmdhelp_cibstatus_load,load the CIB status section]]
+==== `load`
+
+Load a status section from a file, a shadow CIB, or the running
+cluster. By default, the current (+live+) status section is
+modified. Note that if the +live+ status section is modified it
+is not going to be updated if the cluster status changes, because
+that would overwrite the user changes. To make `crm` drop changes
+and resume use of the running cluster status, run +load live+.
+
+All CIB shadow configurations contain the status section which is
+a snapshot of the status section taken at the time the shadow was
+created. Obviously, this status section doesn't have much to do
+with the running cluster status, unless the shadow CIB has just
+been created. Therefore, the `ptest` command by default uses the
+running cluster status section.
+
+Usage:
+...............
+load {<file>|shadow:<cib>|live}
+...............
+Example:
+...............
+load bug-12299.xml
+load shadow:test1
+...............
+
+[[cmdhelp_cibstatus_node,change node status]]
+==== `node`
+
+Change the node status. It is possible to throw a node out of
+the cluster, make it a member, or set its state to unclean.
+
++online+:: Set the +node_state+ `crmd` attribute to +online+
+and the +expected+ and +join+ attributes to +member+. The effect
+is that the node becomes a cluster member.
+
++offline+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to empty. This makes the node
+cleanly removed from the cluster.
+
++unclean+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to +member+. In this case the node
+has unexpectedly disappeared.
+
+Usage:
+...............
+node <node> {online|offline|unclean}
+...............
+Example:
+...............
+node xen-b unclean
+...............
+
+[[cmdhelp_cibstatus_op,edit outcome of a resource operation]]
+==== `op`
+
+Edit the outcome of a resource operation. This way you can
+tell CRM that it ran an operation and that the resource agent
+returned certain exit code. It is also possible to change the
+operation's status. In case the operation status is set to
+something other than +done+, the exit code is effectively
+ignored.
+
+Usage:
+...............
+op <operation> <resource> <exit_code> [<op_status>] [<node>]
+
+operation :: probe | monitor[:<n>] | start | stop |
+ promote | demote | notify | migrate_to | migrate_from
+exit_code :: <rc> | success | generic | args |
+ unimplemented | perm | installed | configured | not_running |
+ master | failed_master
+op_status :: pending | done | cancelled | timeout | notsupported | error
+
+n :: the monitor interval in seconds; if omitted, the first
+ recurring operation is referenced
+rc :: numeric exit code in range 0..9
+...............
+Example:
+...............
+op start d1 xen-b generic
+op start d1 xen-b 1
+op monitor d1 xen-b not_running
+op stop d1 xen-b 0 timeout
+...............
+
+[[cmdhelp_cibstatus_origin,display origin of the CIB status section]]
+==== `origin`
+
+Show the origin of the status section currently in use. This
+essentially shows the latest `load` argument.
+
+Usage:
+...............
+origin
+...............
+
+[[cmdhelp_cibstatus_quorum,set the quorum]]
+==== `quorum`
+
+Set the quorum value.
+
+Usage:
+...............
+quorum <bool>
+...............
+Example:
+...............
+quorum false
+...............
+
+[[cmdhelp_cibstatus_run,run policy engine]]
+==== `run`
+
+Run the policy engine with the edited status section.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+run [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+run
+...............
+
+[[cmdhelp_cibstatus_save,save the CIB status section]]
+==== `save`
+
+The current internal status section with whatever modifications
+were performed can be saved to a file or shadow CIB.
+
+If the file exists and contains a complete CIB, only the status
+section is going to be replaced and the rest of the CIB will
+remain intact. Otherwise, the current user edited configuration
+is saved along with the status section.
+
+Note that all modifications are saved in the source file as soon
+as they are run.
+
+Usage:
+...............
+save [<file>|shadow:<cib>]
+...............
+Example:
+...............
+save bug-12299.xml
+...............
+
+[[cmdhelp_cibstatus_show,show CIB status section]]
+==== `show`
+
+Show the current status section in the XML format. Brace yourself
+for some unreadable output. Add +changed+ option to get a human
+readable output of all changes.
+
+Usage:
+...............
+show [changed]
+...............
+
+[[cmdhelp_cibstatus_simulate,simulate cluster transition]]
+==== `simulate`
+
+Run the policy engine with the edited status section and simulate
+the transition.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+simulate [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+simulate
+...............
+
+[[cmdhelp_cibstatus_ticket,manage tickets]]
+==== `ticket`
+
+Modify the ticket status. Tickets can be granted and revoked.
+Granted tickets could be activated or put in standby.
+
+Usage:
+...............
+ticket <ticket> {grant|revoke|activate|standby}
+...............
+Example:
+...............
+ticket ticketA grant
+...............
+
+[[cmdhelp_assist,Configuration assistant]]
+=== `assist` - Configuration assistant
+
+The `assist` sublevel is a collection of helper
+commands that create or modify resources and
+constraints, to simplify the creation of certain
+configurations.
+
+For more information on individual commands, see
+the help text for those commands.
+
+[[cmdhelp_assist_template,Create template for primitives]]
+==== `template`
+
+This command takes a list of primitives as argument, and creates a new
+`rsc_template` for these primitives. It can only do this if the
+primitives do not already share a template and are of the same type.
+
+Usage:
+........
+template primitive-1 primitive-2 primitive-3
+........
+
+[[cmdhelp_assist_weak-bond,Create a weak bond between resources]]
+==== `weak-bond`
+
+A colocation between a group of resources says that the resources
+should be located together, but it also means that those resources are
+dependent on each other. If one of the resources fails, the others
+will be restarted.
+
+If this is not desired, it is possible to circumvent: By placing the
+resources in a non-sequential set and colocating the set with a dummy
+resource which is not monitored, the resources will be placed together
+but will have no further dependency on each other.
+
+This command creates both the constraint and the dummy resource needed
+for such a colocation.
+
+Usage:
+........
+weak-bond resource-1 resource-2
+........
+
+[[cmdhelp_maintenance,Maintenance mode commands]]
+=== `maintenance` - Maintenance mode commands
+
+Maintenance mode commands are commands that manipulate resources
+directly without going through the cluster infrastructure. Therefore,
+it is essential to ensure that the cluster does not attempt to monitor
+or manipulate the resources while these commands are being executed.
+
+To ensure this, these commands require that maintenance mode is set
+either for the particular resource, or for the whole cluster.
+
+[[cmdhelp_maintenance_action,Invoke a resource action]]
+==== `action`
+
+Invokes the given action for the resource. This is
+done directly via the resource agent, so the command must
+be issued while the cluster or the resource is in
+maintenance mode.
+
+Unless the action is `start` or `monitor`, the action must be invoked
+on the same node as where the resource is running. If the resource is
+running on multiple nodes, the command will fail.
+
+To use SSH for executing resource actions on multiple nodes, append
+`ssh` after the action name. This requires SSH access to be configured
+between the nodes and the parallax python package to be installed.
+
+Usage:
+...............
+action <rsc> <action>
+action <rsc> <action> ssh
+...............
+Example:
+...............
+action webserver reload
+action webserver monitor ssh
+...............
+
+[[cmdhelp_maintenance_off,Disable maintenance mode]]
+==== `off`
+
+Disables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+off
+off <rsc>
+...............
+Example:
+...............
+off rsc1
+...............
+
+[[cmdhelp_maintenance_on,Enable maintenance mode]]
+==== `on`
+
+Enables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+on
+on <rsc>
+...............
+Example:
+...............
+on rsc1
+...............
+
+[[cmdhelp_history,Cluster history]]
+=== `history` - Cluster history
+
+Examining Pacemaker's history is a particularly involved task. The
+number of subsystems to be considered, the complexity of the
+configuration, and the set of various information sources, most of
+which are not exactly human readable, keep analyzing resource or node
+problems accessible to only the most knowledgeable. Or, depending on
+the point of view, to the most persistent. The following set of
+commands has been devised in hope to make cluster history more
+accessible.
+
+Of course, looking at _all_ history could be time consuming regardless
+of how good the tools at hand are. Therefore, one should first say
+which period he or she wants to analyze. If not otherwise specified,
+the last hour is considered. Logs and other relevant information is
+collected using `crm report`. Since this process takes some time and
+we always need fresh logs, information is refreshed in a much faster
+way using the python parallax module. If +python-parallax+ is not
+found on the system, examining a live cluster is still possible --
+though not as comfortable.
+
+Apart from examining a live cluster, events may be retrieved from a
+report generated by `crm report` (see also the +-H+ option). In that
+case we assume that the period stretching the whole report needs to be
+investigated. Of course, it is still possible to further reduce the
+time range.
+
+If you have discovered an issue that you want to show someone else,
+you can use the `session pack` command to save the current session as
+a tarball, similar to those generated by `crm report`.
+
+In order to minimize the size of the tarball, and to make it easier
+for others to find the interesting events, it is recommended to limit
+the time frame which the saved session covers. This can be done using
+the `timeframe` command (example below).
+
+It is also possible to name the saved session using the `session save`
+command.
+
+Example:
+...............
+crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
+crm(live)history# session save strange_restart
+crm(live)history# session pack
+Report saved in .../strange_restart.tar.bz2
+crm(live)history#
+...............
+
+[[cmdhelp_history_detail,set the level of detail shown]]
+==== `detail`
+
+How much detail to show from the logs. Valid detail levels are either
+`0` or `1`, where `1` is the highest detail level. The default detail
+level is `0`.
+
+Usage:
+...............
+detail <detail_level>
+
+detail_level :: small integer (defaults to 0)
+...............
+Example:
+...............
+detail 1
+...............
+
+[[cmdhelp_history_diff,cluster states/transitions difference]]
+==== `diff`
+
+A transition represents a change in cluster configuration or
+state. Use `diff` to see what has changed between two
+transitions.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+diff <pe> <pe> [status] [html]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+diff 2066 2067
+diff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_history_events,Show events in log]]
+==== `events`
+
+By analysing the log output and looking for particular
+patterns, the `events` command helps sifting through
+the logs to find when particular events like resources
+changing state or node failure may have occurred.
+
+This can be used to generate a combined list of events
+from all nodes.
+
+Usage:
+...............
+events
+...............
+
+Example:
+...............
+events
+...............
+
+[[cmdhelp_history_exclude,exclude log messages]]
+==== `exclude`
+
+If a log is infested with irrelevant messages, those messages may
+be excluded by specifying a regular expression. The regular
+expressions used are Python extended. This command is additive.
+To drop all regular expressions, use +exclude clear+. Run
+`exclude` only to see the current list of regular expressions.
+Excludes are saved along with the history sessions.
+
+Usage:
+...............
+exclude [<regex>|clear]
+...............
+Example:
+...............
+exclude kernel.*ocfs2
+...............
+
+[[cmdhelp_history_graph,generate a directed graph from the PE file]]
+==== `graph`
+
+Create a graphviz graphical layout from the PE file (the
+transition). Every transition contains the cluster configuration
+which was active at the time. See also <<cmdhelp_configure_graph,generate a directed graph
+from configuration>>.
+
+Usage:
+...............
+graph <pe> [<gtype> [<file> [<img_format>]]]
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph -1
+graph 322 dot clu1.conf.dot
+graph 322 dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_history_info,Cluster information summary]]
+==== `info`
+
+The `info` command provides a summary of the information source, which
+can be either a live cluster snapshot or a previously generated
+report.
+
+Usage:
+...............
+info
+...............
+Example:
+...............
+info
+...............
+
+[[cmdhelp_history_latest,show latest news from the cluster]]
+==== `latest`
+
+The `latest` command shows a bit of recent history, more
+precisely whatever happened since the last cluster change (the
+latest transition). If the transition is running, the shell will
+first wait until it finishes.
+
+Usage:
+...............
+latest
+...............
+Example:
+...............
+latest
+...............
+
+[[cmdhelp_history_limit,limit timeframe to be examined]]
+==== `limit` (`timeframe`)
+
+This command can be used to modify the time span to examine. All
+history commands look at events within a certain time span.
+
+For the `live` source, the default time span is the _last hour_.
+
+There is no time span limit for the `hb_report` source.
+
+The time period is parsed by the `dateutil` python module. It
+covers a wide range of date formats. For instance:
+
+- 3:00 (today at 3am)
+- 15:00 (today at 3pm)
+- 2010/9/1 2pm (September 1st 2010 at 2pm)
+
+For more examples of valid time/date statements, please refer to the
+`python-dateutil` documentation:
+
+- https://dateutil.readthedocs.org/[dateutil.readthedocs.org]
+
+If the dateutil module is not available, then the time is parsed using
+strptime and only the kind as printed by `date(1)` is allowed:
+
+- Tue Sep 15 20:46:27 CEST 2010
+
+Usage:
+...............
+limit [<from_time>] [<to_time>]
+...............
+Examples:
+...............
+limit 10:15
+limit 15h22m 16h
+limit "Sun 5 20:46" "Sun 5 22:00"
+...............
+
+[[cmdhelp_history_log,log content]]
+==== `log`
+
+Show messages logged on one or more nodes. Leaving out a node
+name produces combined logs of all nodes. Messages are sorted by
+time and, if the terminal emulations supports it, displayed in
+different colours depending on the node to allow for easier
+reading.
+
+The sorting key is the timestamp as written by syslog which
+normally has the maximum resolution of one second. Obviously,
+messages generated by events which share the same timestamp may
+not be sorted in the same way as they happened. Such close events
+may actually happen fairly often.
+
+Usage:
+...............
+log [<node> [<node> ...] ]
+...............
+Example:
+...............
+log node-a
+...............
+
+[[cmdhelp_history_node,node events]]
+==== `node`
+
+Show important events that happened on a node. Important events
+are node lost and join, standby and online, and fence. Use either
+node names or extended regular expressions.
+
+Usage:
+...............
+node <node> [<node> ...]
+...............
+Example:
+...............
+node node1
+...............
+
+[[cmdhelp_history_peinputs,list or get PE input files]]
+==== `peinputs`
+
+Every event in the cluster results in generating one or more
+Policy Engine (PE) files. These files describe future motions of
+resources. The files are listed as full paths in the current
+report directory. Add +v+ to also see the creation time stamps.
+
+Usage:
+...............
+peinputs [{<range>|<number>} ...] [v]
+
+range :: <n1>:<n2>
+...............
+Example:
+...............
+peinputs
+peinputs 440:444 446
+peinputs v
+...............
+
+[[cmdhelp_history_refresh,refresh live report]]
+==== `refresh`
+
+This command makes sense only for the +live+ source and makes
+`crm` collect the latest logs and other relevant information from
+the logs. If you want to make a completely new report, specify
++force+.
+
+Usage:
+...............
+refresh [force]
+...............
+
+[[cmdhelp_history_resource,resource events]]
+==== `resource`
+
+Show actions and any failures that happened on all specified
+resources on all nodes. Normally, one gives resource names as
+arguments, but it is also possible to use extended regular
+expressions. Note that neither groups nor clones or master/slave
+names are ever logged. The resource command is going to expand
+all of these appropriately, so that clone instances or resources
+which are part of a group are shown.
+
+Usage:
+...............
+resource <rsc> [<rsc> ...]
+...............
+Example:
+...............
+resource bigdb public_ip
+resource my_.*_db2
+resource ping_clone
+...............
+
+[[cmdhelp_history_session,manage history sessions]]
+==== `session`
+
+Sometimes you may want to get back to examining a particular
+history period or bug report. In order to make that easier, the
+current settings can be saved and later retrieved.
+
+If the current history being examined is coming from a live
+cluster the logs, PE inputs, and other files are saved too,
+because they may disappear from nodes. For the existing reports
+coming from `hb_report`, only the directory location is saved
+(not to waste space).
+
+A history session may also be packed into a tarball which can
+then be sent to support.
+
+Leave out subcommand to see the current session.
+
+Usage:
+...............
+session [{save|load|delete} <name> | pack [<name>] | update | list]
+...............
+Examples:
+...............
+session save bnc966622
+session load rsclost-2
+session list
+...............
+
+[[cmdhelp_history_setnodes,set the list of cluster nodes]]
+==== `setnodes`
+
+In case the host this program runs on is not part of the cluster,
+it is necessary to set the list of nodes.
+
+Usage:
+...............
+setnodes node <node> [<node> ...]
+...............
+Example:
+...............
+setnodes node_a node_b
+...............
+
+[[cmdhelp_history_show,show status or configuration of the PE input file]]
+==== `show`
+
+Every transition is saved as a PE file. Use this command to
+render that PE file either as configuration or status. The
+configuration output is the same as `crm configure show`.
+
+Usage:
+...............
+show <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+show 2066
+show pe-input-2080.bz2 status
+...............
+
+[[cmdhelp_history_source,set source to be examined]]
+==== `source`
+
+Events to be examined can come from the current cluster or from a
+`hb_report` report. This command sets the source. `source live`
+sets source to the running cluster and system logs. If no source
+is specified, the current source information is printed.
+
+In case a report source is specified as a file reference, the file
+is going to be unpacked in place where it resides. This directory
+is not removed on exit.
+
+Usage:
+...............
+source [<dir>|<file>|live]
+...............
+Examples:
+...............
+source live
+source /tmp/customer_case_22.tar.bz2
+source /tmp/customer_case_22
+source
+...............
+
+[[cmdhelp_history_transition,show transition]]
+==== `transition`
+
+This command will print actions planned by the PE and run
+graphviz (`dotty`) to display a graphical representation of the
+transition. Of course, for the latter an X11 session is required.
+This command invokes `ptest(8)` in background.
+
+The +showdot+ subcommand runs graphviz (`dotty`) to display a
+graphical representation of the +.dot+ file which has been
+included in the report. Essentially, it shows the calculation
+produced by `pengine` which is installed on the node where the
+report was produced. In optimal case this output should not
+differ from the one produced by the locally installed `pengine`.
+
+The `log` subcommand shows the full log for the duration of the
+transition.
+
+A transition can also be saved to a CIB shadow for further
+analysis or use with `cib` or `configure` commands (use the
+`save` subcommand). The shadow file name defaults to the name of
+the PE input file.
+
+If the PE input file number is not provided, it defaults to the
+last one, i.e. the last transition. The last transition can also
+be referenced with number 0. If the number is negative, then the
+corresponding transition relative to the last one is chosen.
+
+If there are warning and error PE input files or different nodes
+were the DC in the observed timeframe, it may happen that PE
+input file numbers collide. In that case provide some unique part
+of the path to the file.
+
+After the `ptest` output, logs about events that happened during
+the transition are printed.
+
+The `tags` subcommand scans the logs for the transition and return a
+list of key events during that transition. For example, the tag
++error+ will be returned if there are any errors logged during the
+transition.
+
+Usage:
+...............
+transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+transition showdot [<number>|<index>|<file>]
+transition log [<number>|<index>|<file>]
+transition save [<number>|<index>|<file> [name]]
+transition tags [<number>|<index>|<file>]
+...............
+Examples:
+...............
+transition
+transition 444
+transition -1
+transition pe-error-3.bz2
+transition node-a/pengine/pe-input-2.bz2
+transition showdot 444
+transition log
+transition save 0 enigma-22
+...............
+
+[[cmdhelp_history_transitions,List transitions]]
+==== `transitions`
+
+A transition represents a change in cluster configuration or
+state. This command lists the transitions in the current timeframe.
+
+Usage:
+...............
+transitions
+...............
+Example:
+...............
+transitions
+...............
+
+
+[[cmdhelp_history_wdiff,cluster states/transitions difference]]
+==== `wdiff`
+
+A transition represents a change in cluster configuration or
+state. Use `wdiff` to see what has changed between two
+transitions as word differences on a line-by-line basis.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+wdiff <pe> <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+wdiff 2066 2067
+wdiff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_root_report,Create cluster status report]]
+=== `report`
+
+Interface to a tool for creating a cluster report. A report is an
+archive containing log files, configuration files, system information
+and other relevant data for a given time period. This is a useful tool
+for collecting data to attach to bug reports, or for detecting the
+root cause of errors resulting in resource failover, for example.
+
+See `crmsh_hb_report(8)` for more details on arguments,
+or call `crm report -h`
+
+Usage:
+...............
+report -f {time|"cts:"testnum} [-t time] [-u user] [-l file]
+ [-n nodes] [-E files] [-p patt] [-L patt] [-e prog]
+ [-MSDZAVsvhd] [dest]
+...............
+
+Examples:
+...............
+report -f 2pm report_1
+report -f "2007/9/5 12:30" -t "2007/9/5 14:00" report_2
+report -f 1:00 -t 3:00 -l /var/log/cluster/ha-debug report_3
+report -f "09sep07 2:00" -u hbadmin report_4
+report -f 18:00 -p "usern.*" -p "admin.*" report_5
+report -f cts:133 ctstest_133
+...............
+
+=== `end` (`cd`, `up`)
+
+The `end` command ends the current level and the user moves to
+the parent level. This command is available everywhere.
+
+Usage:
+...............
+end
+...............
+
+=== `help`
+
+The `help` command prints help for the current level or for the
+specified topic (command). This command is available everywhere.
+
+Usage:
+...............
+help [<topic>]
+...............
+
+=== `quit` (`exit`, `bye`)
+
+Leave the program.
+
+BUGS
+----
+Even though all sensible configurations (and most of those that
+are not) are going to be supported by the crm shell, I suspect
+that it may still happen that certain XML constructs may confuse
+the tool. When that happens, please file a bug report.
+
+The crm shell will not try to update the objects it does not
+understand. Of course, it is always possible to edit such objects
+in the XML format.
+
+AUTHORS
+-------
+Dejan Muhamedagic, <dejan@suse.de>
+Kristoffer Gronlund <kgronlund@suse.com>
+and many OTHERS
+
+SEE ALSO
+--------
+crm_resource(8), crm_attribute(8), crm_mon(8), cib_shadow(8),
+ptest(8), dotty(1), crm_simulate(8), cibadmin(8)
+
+
+COPYING
+-------
+Copyright \(C) 2008-2013 Dejan Muhamedagic.
+Copyright \(C) 2013 Kristoffer Gronlund.
+
+Free use of this software is granted under the terms of the GNU General Public License (GPL).
+
+//////////////////////
+ vim:ts=4:sw=4:expandtab:
+//////////////////////
diff --git a/doc/website-v1/man-4.3.adoc b/doc/website-v1/man-4.3.adoc
new file mode 100644
index 0000000..2b82298
--- /dev/null
+++ b/doc/website-v1/man-4.3.adoc
@@ -0,0 +1,5160 @@
+:man source: crm
+:man version: 4.0.0
+:man manual: crmsh documentation
+
+crm(8)
+======
+
+NAME
+----
+crm - Pacemaker command line interface for configuration and management
+
+
+SYNOPSIS
+--------
+*crm* [OPTIONS] [SUBCOMMAND ARGS...]
+
+
+[[topics_Description,Program description]]
+DESCRIPTION
+-----------
+The `crm` shell is a command-line based cluster configuration and
+management tool. Its goal is to assist as much as possible with the
+configuration and maintenance of Pacemaker-based High Availability
+clusters.
+
+For more information on Pacemaker itself, see http://clusterlabs.org/.
+
+`crm` works both as a command-line tool to be called directly from the
+system shell, and as an interactive shell with extensive tab
+completion and help.
+
+The primary focus of the `crm` shell is to provide a simplified and
+consistent interface to Pacemaker, but it also provides tools for
+managing the creation and configuration of High Availability clusters
+from scratch. To learn more about this aspect of `crm`, see the
+`cluster` section below.
+
+The `crm` shell can be used to manage every aspect of configuring and
+maintaining a cluster. It provides a simplified line-based syntax on
+top of the XML configuration format used by Pacemaker, commands for
+starting and stopping resources, tools for exploring the history of a
+cluster including log scraping and a set of cluster scripts useful for
+automating the setup and installation of services on the cluster
+nodes.
+
+The `crm` shell is line oriented: every command must start and finish
+on the same line. It is possible to use a continuation character (+\+)
+to write one command in two or more lines. The continuation character
+is commonly used when displaying configurations.
+
+[[topics_CommandLine,Command line options]]
+OPTIONS
+-------
+*-f, --file*='FILE'::
+ Load commands from the given file. If a dash +-+ is used in place
+ of a file name, `crm` will read commands from the shell standard
+ input (`stdin`).
+
+*-c, --cib*='CIB'::
+ Start the session using the given shadow CIB file.
+ Equivalent to +cib use <CIB>+.
+
+*-D, --display=*'OUTPUT_TYPE'::
+ Choose one of the output options: +plain+, +color-always+, +color+,
+ or +uppercase+. The default is +color+ if the terminal emulation
+ supports colors. Otherwise, +plain+ is used.
+
+*-F, --force*::
+ Make `crm` proceed with applying changes where it would normally
+ ask the user to confirm before proceeding. This option is mainly
+ useful in scripts, and should be used with care.
+
+*-w, --wait*::
+ Make `crm` wait for the cluster transition to finish (for the
+ changes to take effect) after each processed line.
+
+*-H, --history*='DIR|FILE|SESSION'::
+ A directory or file containing a cluster report to load
+ into the `history` commands, or the name of a previously
+ saved history session.
+
+*-h, --help*::
+ Print help page.
+
+*--version*::
+ Print crmsh version and build information (Mercurial Hg changeset
+ hash).
+
+*-d, --debug*::
+ Print verbose debugging information.
+
+*-R, --regression-tests*::
+ Enables extra verbose trace logging used by the regression
+ tests. Logs all external calls made by crmsh.
+
+*--scriptdir*='DIR'::
+ Extra directory where crm looks for cluster scripts, or a list of
+ directories separated by semi-colons (e.g. +/dir1;/dir2;etc.+).
+
+*-o, --opt*='OPTION=VALUE'::
+ Set crmsh option temporarily. If the options are saved using
+ +options save+ then the value passed here will also be saved.
+ Multiple options can be set by using +-o+ multiple times.
+
+[[topics_Introduction,Introduction]]
+== Introduction
+
+This section of the user guide covers general topics about the user
+interface and describes some of the features of `crmsh` in detail.
+
+[[topics_Introduction_Interface,User interface]]
+=== User interface
+
+The main purpose of `crmsh` is to provide a simple yet powerful
+interface to the cluster stack. There are two main modes of operation
+with the user interface of `crmsh`:
+
+* Command line (single-shot) use - Use `crm` as a regular UNIX command
+ from your usual shell. `crm` has full bash completion built in, so
+ using it in this manner should be as comfortable and familiar as
+ using any other command-line tool.
+
+* Interactive mode - By calling `crm` without arguments, or by calling
+ it with only a sublevel as argument, `crm` enters the interactive
+ mode. In this mode, it acts as its own command shell, which
+ remembers which sublevel you are currently in and allows for rapid
+ and convenient execution of multiple commands within the same
+ sublevel. This mode also has full tab completion, as well as
+ built-in interactive help and syntax highlighting.
+
+Here are a few examples of using `crm` both as a command-line tool and
+as an interactive shell:
+
+.Command line (one-shot) use:
+........
+# crm resource stop www_app
+........
+
+.Interactive use:
+........
+# crm
+crm(live)# resource
+crm(live)resource# unmanage tetris_1
+crm(live)resource# up
+crm(live)# node standby node4
+........
+
+.Cluster configuration:
+........
+# crm configure<<EOF
+ #
+ # resources
+ #
+ primitive disk0 iscsi \
+ params portal=192.168.2.108:3260 target=iqn.2008-07.com.suse:disk0
+ primitive fs0 Filesystem \
+ params device=/dev/disk/by-label/disk0 directory=/disk0 fstype=ext3
+ primitive internal_ip IPaddr params ip=192.168.1.101
+ primitive apache apache \
+ params configfile=/disk0/etc/apache2/site0.conf
+ primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s
+ primitive pingd pingd \
+ params name=pingd dampen=5s multiplier=100 host_list="r1 r2"
+ #
+ # monitor apache and the UPS
+ #
+ monitor apache 60s:30s
+ monitor apcfence 120m:60s
+ #
+ # cluster layout
+ #
+ group internal_www \
+ disk0 fs0 internal_ip apache
+ clone fence apcfence \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ clone conn pingd \
+ meta globally-unique=false clone-max=2 clone-node-max=1
+ location node_pref internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+ #
+ # cluster properties
+ #
+ property stonith-enabled=true
+ commit
+EOF
+........
+
+The `crm` interface is hierarchical, with commands organized into
+separate levels by functionality. To list the available levels and
+commands, either execute +help <level>+, or, if at the top level of
+the shell, simply typing `help` will provide an overview of all
+available levels and commands.
+
+The +(live)+ string in the `crm` prompt signifies that the current CIB
+in use is the cluster live configuration. It is also possible to
+work with so-called <<topics_Features_Shadows,shadow CIBs>>. These are separate, inactive
+configurations stored in files, that can be applied and thereby
+replace the live configuration at any time.
+
+[[topics_Introduction_Completion,Tab completion]]
+=== Tab completion
+
+The `crm` makes extensive use of tab completion. The completion
+is both static (i.e. for `crm` commands) and dynamic. The latter
+takes into account the current status of the cluster or
+information from installed resource agents. Sometimes, completion
+may also be used to get short help on resource parameters. Here
+are a few examples:
+
+...............
+crm(live)resource# <TAB><TAB>
+ban demote maintenance param scores trace
+cd failcount manage promote secret unmanage
+cleanup help meta quit start untrace
+clear locate move refresh status up
+constraints ls operations restart stop utilization
+
+crm(live)configure# primitive fence-1 <TAB><TAB>
+lsb: ocf: service: stonith: systemd:
+
+crm(live)configure# primitive fence-1 stonith:<TAB><TAB>
+apcmaster external/ippower9258 fence_legacy
+apcmastersnmp external/kdumpcheck ibmhmc
+apcsmart external/libvirt ipmilan
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params <TAB><TAB>
+auth= hostname= ipaddr= login= password= port= priv=
+
+crm(live)configure# primitive fence-1 stonith:ipmilan params auth=<TAB><TAB>
+auth* (string)
+ The authorization type of the IPMI session ("none", "straight", "md2", or "md5")
+...............
+
+`crmsh` also comes with bash completion usable directly from the
+system shell. This should be installed automatically with the command
+itself.
+
+[[topics_Introduction_Shorthand,Shorthand syntax]]
+=== Shorthand syntax
+
+When using the `crm` shell to manage clusters, you will end up typing
+a lot of commands many times over. Clear command names like
++configure+ help in understanding and learning to use the cluster
+shell, but is easy to misspell and is tedious to type repeatedly. The
+interactive mode and tab completion both help with this, but the `crm`
+shell also has the ability to understand a variety of shorthand
+aliases for all of the commands.
+
+For example, instead of typing `crm status`, you can type `crm st` or
+`crm stat`. Instead of `crm configure` you can type `crm cfg` or even
+`crm cf`. `crm resource` can be shorted as `crm rsc`, and so on.
+
+The exact list of accepted aliases is too long to print in full, but
+experimentation and typos should help in discovering more of them.
+
+[[topics_Features,Features]]
+== Features
+
+The feature set of crmsh covers a wide range of functionality, and
+understanding how and when to use the various features of the shell
+can be difficult. This section of the guide describes some of the
+features and use cases of `crmsh` in more depth. The intention is to
+provide a deeper understanding of these features, but also to serve as
+a guide to using them.
+
+[[topics_Features_Shadows,Shadow CIB usage]]
+=== Shadow CIB usage
+
+A Shadow CIB is a normal cluster configuration stored in a file.
+They may be manipulated in much the same way as the _live_ CIB, with
+the key difference that changes to a shadow CIB have no effect on the
+actual cluster resources. An administrator may choose to apply any of
+them to the cluster, thus replacing the running configuration with the
+one found in the shadow CIB.
+
+The `crm` prompt always contains the name of the configuration which
+is currently in use, or the string _live_ if using the live cluster
+configuration.
+
+When editing the configuration in the `configure` level, no changes
+are actually applied until the `commit` command is executed. It is
+possible to start editing a configuration as usual, but instead of
+committing the changes to the active CIB, save them to a shadow CIB.
+
+The following example `configure` session demonstrates how this can be
+done:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+
+[[topics_Features_Checks,Configuration semantic checks]]
+=== Configuration semantic checks
+
+Resource definitions may be checked against the meta-data
+provided with the resource agents. These checks are currently
+carried out:
+
+- are required parameters set
+- existence of defined parameters
+- timeout values for operations
+
+The parameter checks are obvious and need no further explanation.
+Failures in these checks are treated as configuration errors.
+
+The timeouts for operations should be at least as long as those
+recommended in the meta-data. Too short timeout values are a
+common mistake in cluster configurations and, even worse, they
+often slip through if cluster testing was not thorough. Though
+operation timeouts issues are treated as warnings, make sure that
+the timeouts are usable in your environment. Note also that the
+values given are just _advisory minimum_---your resources may
+require longer timeouts.
+
+User may tune the frequency of checks and the treatment of errors
+by the <<cmdhelp_options_check-frequency,`check-frequency`>> and
+<<cmdhelp_options_check-mode,`check-mode`>> preferences.
+
+Note that if the +check-frequency+ is set to +always+ and the
++check-mode+ to +strict+, errors are not tolerated and such
+configuration cannot be saved.
+
+[[topics_Features_Templates,Configuration templates]]
+=== Configuration templates
+
+.Deprecation note
+****************************
+Configuration templates have been deprecated in favor of the more
+capable `cluster scripts`. To learn how to use cluster scripts, see
+the dedicated documentation on the `crmsh` website at
+http://crmsh.github.io/, or in the <<cmdhelp_script,Script section>>.
+****************************
+
+Configuration templates are ready made configurations created by
+cluster experts. They are designed in such a way so that users
+may generate valid cluster configurations with minimum effort.
+If you are new to Pacemaker, templates may be the best way to
+start.
+
+We will show here how to create a simple yet functional Apache
+configuration:
+...............
+# crm configure
+crm(live)configure# template
+crm(live)configure template# list templates
+apache filesystem virtual-ip
+crm(live)configure template# new web <TAB><TAB>
+apache filesystem virtual-ip
+crm(live)configure template# new web apache
+INFO: pulling in template apache
+INFO: pulling in template virtual-ip
+crm(live)configure template# list
+web2-d web2 vip2 web3 vip web
+...............
+
+We enter the `template` level from `configure`. Use the `list`
+command to show templates available on the system. The `new`
+command creates a configuration from the +apache+ template. You
+can use tab completion to pick templates. Note that the apache
+template depends on a virtual IP address which is automatically
+pulled along. The `list` command shows the just created +web+
+configuration, among other configurations (I hope that you,
+unlike me, will use more sensible and descriptive names).
+
+The `show` command, which displays the resulting configuration,
+may be used to get an idea about the minimum required changes
+which have to be done. All +ERROR+ messages show the line numbers
+in which the respective parameters are to be defined:
+...............
+crm(live)configure template# show
+ERROR: 23: required parameter ip not set
+ERROR: 61: required parameter id not set
+ERROR: 65: required parameter configfile not set
+crm(live)configure template# edit
+...............
+
+The `edit` command invokes the preferred text editor with the
++web+ configuration. At the top of the file, the user is advised
+how to make changes. A good template should require from the user
+to specify only parameters. For example, the +web+ configuration
+we created above has the following required and optional
+parameters (all parameter lines start with +%%+):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip
+31:%% netmask
+35:%% lvs_support
+61:%% id
+65:%% configfile
+71:%% options
+76:%% envfiles
+...............
+
+These lines are the only ones that should be modified. Simply
+append the parameter value at the end of the line. For instance,
+after editing this template, the result could look like this (we
+used tabs instead of spaces to make the values stand out):
+...............
+$ grep -n ^%% ~/.crmconf/web
+23:%% ip 192.168.1.101
+31:%% netmask
+35:%% lvs_support
+61:%% id websvc
+65:%% configfile /etc/apache2/httpd.conf
+71:%% options
+76:%% envfiles
+...............
+
+As you can see, the parameter line format is very simple:
+...............
+%% <name> <value>
+...............
+
+After editing the file, use `show` again to display the
+configuration:
+...............
+crm(live)configure template# show
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf"
+monitor apache 120s:60s
+group websvc \
+ apache virtual-ip
+...............
+
+The target resource of the apache template is a group which we
+named +websvc+ in this sample session.
+
+This configuration looks exactly as you could type it at the
+`configure` level. The point of templates is to save you some
+typing. It is important, however, to understand the configuration
+produced.
+
+Finally, the configuration may be applied to the current
+crm configuration (note how the configuration changed slightly,
+though it is still equivalent, after being digested at the
+`configure` level):
+...............
+crm(live)configure template# apply
+crm(live)configure template# cd ..
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive virtual-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache virtual-ip
+...............
+
+Note that this still does not commit the configuration to the CIB
+which is used in the shell, either the running one (+live+) or
+some shadow CIB. For that you still need to execute the `commit`
+command.
+
+To complete our example, we should also define the preferred node
+to run the service:
+
+...............
+crm(live)configure# location websvc-pref websvc 100: xen-b
+...............
+
+If you are not happy with some resource names which are provided
+by default, you can rename them now:
+
+...............
+crm(live)configure# rename virtual-ip intranet-ip
+crm(live)configure# show
+node xen-b
+node xen-c
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+primitive intranet-ip IPaddr \
+ params ip=192.168.1.101
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+To summarize, working with templates typically consists of the
+following steps:
+
+- `new`: create a new configuration from templates
+- `edit`: define parameters, at least the required ones
+- `show`: see if the configuration is valid
+- `apply`: apply the configuration to the `configure` level
+
+[[topics_Features_Testing,Resource testing]]
+=== Resource testing
+
+The amount of detail in a cluster makes all configurations prone
+to errors. By far the largest number of issues in a cluster is
+due to bad resource configuration. The shell can help quickly
+diagnose such problems. And considerably reduce your keyboard
+wear.
+
+Let's say that we entered the following configuration:
+...............
+node xen-b
+node xen-c
+node xen-d
+primitive fencer stonith:external/libvirt \
+ params hypervisor_uri="qemu+tcp://10.2.13.1/system" \
+ hostlist="xen-b xen-c xen-d" \
+ op monitor interval=2h
+primitive svc Xinetd \
+ params service=systat \
+ op monitor interval=30s
+primitive intranet-ip IPaddr2 \
+ params ip=10.2.13.100 \
+ op monitor interval=30s
+primitive apache apache \
+ params configfile="/etc/apache2/httpd.conf" \
+ op monitor interval=120s timeout=60s
+group websvc apache intranet-ip
+location websvc-pref websvc 100: xen-b
+...............
+
+Before typing `commit` to submit the configuration to the cib we
+can make sure that all resources are usable on all nodes:
+...............
+crm(live)configure# rsctest websvc svc fencer
+...............
+
+It is important that resources being tested are not running on
+any nodes. Otherwise, the `rsctest` command will refuse to do
+anything. Of course, if the current configuration resides in a
+CIB shadow, then a `commit` is irrelevant. The point being that
+resources are not running on any node.
+
+.Note on stopping all resources
+****************************
+Alternatively to not committing a configuration, it is also
+possible to tell Pacemaker not to start any resources:
+
+...............
+crm(live)configure# property stop-all-resources=yes
+...............
+Almost none---resources of class stonith are still started. But
+shell is not as strict when it comes to stonith resources.
+****************************
+
+Order of resources is significant insofar that a resource depends
+on all resources to its left. In most configurations, it's
+probably practical to test resources in several runs, based on
+their dependencies.
+
+Apart from groups, `crm` does not interpret constraints and
+therefore knows nothing about resource dependencies. It also
+doesn't know if a resource can run on a node at all in case of an
+asymmetric cluster. It is up to the user to specify a list of
+eligible nodes if a resource is not meant to run on every node.
+
+[[topics_Features_Security,Access Control Lists (ACL)]]
+=== Access Control Lists (ACL)
+
+.Note on ACLs in Pacemaker 1.1.12
+****************************
+The support for ACLs has been revised in Pacemaker version 1.1.12 and
+up. Depending on which version you are using, the information in this
+section may no longer be accurate. Look for the `acl_target`
+configuration element for more details on the new syntax.
+****************************
+
+By default, the users from the +haclient+ group have full access
+to the cluster (or, more precisely, to the CIB). Access control
+lists allow for finer access control to the cluster.
+
+Access control lists consist of an ordered set of access rules.
+Each rule allows read or write access or denies access
+completely. Rules are typically combined to produce a specific
+role. Then, users may be assigned a role.
+
+For instance, this is a role which defines a set of rules
+allowing management of a single resource:
+
+...............
+role bigdb_admin \
+ write meta:bigdb:target-role \
+ write meta:bigdb:is-managed \
+ write location:bigdb \
+ read ref:bigdb
+...............
+
+The first two rules allow modifying the +target-role+ and
++is-managed+ meta attributes which effectively enables users in
+this role to stop/start and manage/unmanage the resource. The
+constraints write access rule allows moving the resource around.
+Finally, the user is granted read access to the resource
+definition.
+
+For proper operation of all Pacemaker programs, it is advisable
+to add the following role to all users:
+
+...............
+role read_all \
+ read cib
+...............
+
+For finer grained read access try with the rules listed in the
+following role:
+
+...............
+role basic_read \
+ read node attribute:uname \
+ read node attribute:type \
+ read property \
+ read status
+...............
+
+It is however possible that some Pacemaker programs (e.g.
+`ptest`) may not function correctly if the whole CIB is not
+readable.
+
+Some of the ACL rules in the examples above are expanded by the
+shell to XPath specifications. For instance,
++meta:bigdb:target-role+ expands to:
+
+........
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+........
+
+You can see the expansion by showing XML:
+
+...............
+crm(live) configure# show xml bigdb_admin
+...
+<acls>
+ <acl_role id="bigdb_admin">
+ <write id="bigdb_admin-write"
+ xpath="//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']"/>
+...............
+
+Many different XPath expressions can have equal meaning. For
+instance, the following two are equal, but only the first one is
+going to be recognized as shortcut:
+
+...............
+//primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+//resources/primitive[@id='bigdb']/meta_attributes/nvpair[@name='target-role']
+...............
+
+XPath is a powerful language, but you should try to keep your ACL
+xpaths simple and the builtin shortcuts should be used whenever
+possible.
+
+[[topics_Features_Resourcesets,Syntax: Resource sets]]
+=== Syntax: Resource sets
+
+Using resource sets can be a bit confusing unless one knows the
+details of the implementation in Pacemaker as well as how to interpret
+the syntax provided by `crmsh`.
+
+Three different types of resource sets are provided by `crmsh`, and
+each one implies different values for the two resource set attributes,
++sequential+ and +require-all+.
+
++sequential+::
+ If false, the resources in the set do not depend on each other
+ internally. Setting +sequential+ to +true+ implies a strict order of
+ dependency within the set.
+
++require-all+::
+ If false, only one resource in the set is required to fulfil the
+ requirements of the set. The set of A, B and C with +require-all+
+ set to +false+ is be read as "A OR B OR C" when its dependencies
+ are resolved.
+
+The three types of resource sets modify the attributes in the
+following way:
+
+1. Implicit sets (no brackets). +sequential=true+, +require-all=true+
+2. Parenthesis set (+(+ ... +)+). +sequential=false+, +require-all=true+
+3. Bracket set (+[+ ... +]+). +sequential=false+, +require-all=false+
+
+To create a set with the properties +sequential=true+ and
++require-all=false+, explicitly set +sequential+ in a bracketed set,
++[ A B C sequential=true ]+.
+
+To create multiple sets with both +sequential+ and +require-all+ set to
+true, explicitly set +sequential+ in a parenthesis set:
++A B ( C D sequential=true )+.
+
+[[topics_Features_AttributeListReferences,Syntax: Attribute list references]]
+=== Syntax: Attribute list references
+
+Attribute lists are used to set attributes and parameters for
+resources, constraints and property definitions. For example, to set
+the virtual IP used by an +IPAddr2+ resource the attribute +ip+ can be
+set in an attribute list for that resource.
+
+Attribute lists can have identifiers that name them, and other
+resources can reuse the same attribute list by referring to that name
+using an +$id-ref+. For example, the following statement defines a
+simple dummy resource with an attribute list which sets the parameter
++state+ to the value 1 and sets the identifier for the attribute list
+to +on-state+:
+
+..............
+primitive dummy-1 Dummy params $id=on-state state=1
+..............
+
+To refer to this attribute list from a different resource, refer to
+the +on-state+ name using an id-ref:
+
+..............
+primitive dummy-2 Dummy params $id-ref=on-state
+..............
+
+The resource +dummy-2+ will now also have the parameter +state+ set to the value 1.
+
+[[topics_Features_AttributeReferences,Syntax: Attribute references]]
+=== Syntax: Attribute references
+
+In some cases, referencing complete attribute lists is too
+coarse-grained, for example if two different parameters with different
+names should have the same value set. Instead of having to copy the
+value in multiple places, it is possible to create references to
+individual attributes in attribute lists.
+
+To name an attribute in order to be able to refer to it later, prefix
+the attribute name with a +$+ character (as seen above with the
+special names +$id+ and +$id-ref+:
+
+............
+primitive dummy-1 Dummy params $state=1
+............
+
+The identifier +state+ can now be used to refer to this attribute from other
+primitives, using the +@<id>+ syntax:
+
+............
+primitive dummy-2 Dummy params @state
+............
+
+In some cases, using the attribute name as the identifier doesn't work
+due to name clashes. In this case, the syntax +$<id>:<name>=<value>+
+can be used to give the attribute a different identifier:
+
+............
+primitive dummy-1 params $dummy-state-on:state=1
+primitive dummy-2 params @dummy-state-on
+............
+
+There is also the possibility that two resources both use the same
+attribute value but with different names. For example, a web server
+may have a parameter +server_ip+ for setting the IP address where it
+listens for incoming requests, and a virtual IP resource may have a
+parameter called +ip+ which sets the IP address it creates. To
+configure these two resources with an IP without repeating the value,
+the reference can be given a name using the syntax +@<id>:<name>+.
+
+Example:
+............
+primitive virtual-ip IPaddr2 params $vip:ip=192.168.1.100
+primitive webserver apache params @vip:server_ip
+............
+
+[[topics_Syntax_RuleExpressions,Syntax: Rule expressions]]
+=== Syntax: Rule expressions
+
+Many of the configuration commands in `crmsh` now support the use of
+_rule expressions_, which can influence what attributes apply to a
+resource or under which conditions a constraint is applied, depending
+on changing conditions like date, time, the value of attributes and
+more.
+
+Here is an example of a simple rule expression used to apply a
+a different resource parameter on the node named `node1`:
+
+..............
+primitive my_resource Special \
+ params 2: rule #uname eq node1 interface=eth1 \
+ params 1: interface=eth0
+..............
+
+This primitive resource has two lists of parameters with descending
+priority. The parameter list with the highest priority is applied
+first, but only if the rule expressions for that parameter list all
+apply. In this case, the rule `#uname eq node1` limits the parameter
+list so that it is only applied on `node1`.
+
+Note that rule expressions are not terminated and are immediately
+followed by the data to which the rule is applied. In this case, the
+name-value pair `interface=eth1`.
+
+Rule expressions can contain multiple expressions connected using the
+boolean operator `or` and `and`. The full syntax for rule expressions
+is listed below.
+
+..............
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: <string> | <version> | <number>
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+..............
+
+[[topics_Lifetime,Lifetime parameter format]]
+== Lifetime parameter format
+
+Lifetimes can be specified in the ISO 8601 time format or the ISO 8601
+duration format. To distinguish between months and minutes, use the PT
+prefix before specifying minutes. The duration format is one of
++PnYnMnDTnHnMnS+, +PnW+, +P<date>T<time>+.
+
+P = duration. Y = year. M = month. W = week. D = day. T = time. H =
+hour. M = minute. S = second.
+
+Examples:
+.................
+PT5M = 5 minutes later.
+3D = 3 days later.
+PT1H = 1 hour later.
+.................
+
+The cluster checks lifetimes at an interval defined by the
+cluster-recheck-interval property (default 15 minutes).
+
+
+[[topics_Reference,Command reference]]
+== Command reference
+
+The commands are structured to be compatible with the shell command
+line. Sometimes, the underlying Pacemaker grammar uses characters that
+have special meaning in bash, that will need to be quoted. This
+includes the hash or pound sign (`#`), single and double quotes, and
+any significant whitespace.
+
+Whitespace is also significant when assigning values, meaning that
++key=value+ is different from +key = value+.
+
+Commands can be referenced using short-hand as long as the short-hand
+is unique. This can be either a prefix of the command name or a prefix
+string of characters found in the name.
+
+For example, +status+ can be abbreviated as +st+ or +su+, and
++configure+ as +conf+ or +cfg+.
+
+The syntax for the commands is given below in an informal, BNF-like
+grammar.
+
+* `<value>` denotes a string.
+* `[value]` means that the construct is optional.
+* The ellipsis (`...`) signifies that the previous construct may be
+ repeated.
+* `first|second` means either first or second.
+* The rest are literals (strings, `:`, `=`).
+
+[[cmdhelp_root_status,Cluster status]]
+=== `status`
+
+Show cluster status. The status is displayed by `crm_mon`. Supply
+additional arguments for more information or different format.
+See `crm_mon(8)` for more details.
+
+Example:
+...............
+status
+status simple
+status full
+...............
+
+Usage:
+...............
+status [<option> ...]
+
+option :: full
+ | bynode
+ | inactive
+ | ops
+ | timing
+ | failcounts
+ | verbose
+ | quiet
+ | html
+ | xml
+ | simple
+ | tickets
+ | noheaders
+ | detail
+ | brief
+...............
+
+[[cmdhelp_root_verify,Verify cluster status]]
+=== `verify`
+
+Performs basic checks for the cluster configuration and
+current status, reporting potential issues.
+
+See `crm_verify(8)` and `crm_simulate(8)` for more details.
+
+Example:
+...............
+verify
+verify scores
+...............
+
+Usage:
+...............
+verify [scores]
+...............
+
+[[cmdhelp_cluster,Cluster setup and management]]
+=== `cluster` - Cluster setup and management
+
+Whole-cluster configuration management with High Availability
+awareness.
+
+The commands on the cluster level allows configuration and
+modification of the underlying cluster infrastructure, and also
+supplies tools to do whole-cluster systems management.
+
+These commands enable easy installation and maintenance of a HA
+cluster, by providing support for package installation, configuration
+of the cluster messaging layer, file system setup and more.
+
+[[cmdhelp_cluster_add,Add a new node to the cluster,From Code]]
+==== `add`
+See "crm cluster help add" or "crm cluster add --help"
+
+[[cmdhelp_cluster_copy,Copy file to other cluster nodes]]
+==== `copy`
+
+Copy file to other cluster nodes.
+
+Copies the given file to all other nodes unless given a
+list of nodes to copy to as argument.
+
+Usage:
+...............
+copy <filename> [nodes ...]
+...............
+
+Example:
+...............
+copy /etc/motd
+...............
+
+[[cmdhelp_cluster_diff,Diff file across cluster]]
+==== `diff`
+
+Displays the difference, if any, between a given file
+on different nodes. If the second argument is `--checksum`,
+a checksum of the file will be calculated and displayed for
+each node.
+
+Usage:
+...............
+diff <file> [--checksum] [nodes...]
+...............
+
+Example:
+...............
+diff /etc/crm/crm.conf node2
+diff /etc/resolv.conf --checksum
+...............
+
+[[cmdhelp_cluster_disable,Disable cluster services]]
+==== `disable`
+
+Disable the cluster-related system services on this node.
+
+Usage:
+...............
+disable
+...............
+
+[[cmdhelp_cluster_enable,Enable cluster services]]
+==== `enable`
+
+Enable the cluster-related system services on this node.
+
+Usage:
+...............
+enable
+...............
+
+[[cmdhelp_cluster_geo_init,Configure cluster as geo cluster,From Code]]
+==== `geo-init`
+See "crm cluster help geo_init" or "crm cluster geo_init --help"
+
+[[cmdhelp_cluster_geo_init_arbitrator,Initialize node as geo cluster arbitrator,From Code]]
+==== `geo-init-arbitrator`
+See "crm cluster help geo_init_arbitrator" or "crm cluster geo_init_arbitrator --help"
+
+[[cmdhelp_cluster_geo_join,Join cluster to existing geo cluster,From Code]]
+==== `geo-join`
+See "crm cluster help geo_join" or "crm cluster geo_join --help"
+
+[[cmdhelp_cluster_health,Cluster health check]]
+==== `health`
+
+Runs a larger set of tests and queries on all nodes in the cluster to
+verify the general system health and detect potential problems.
+
+Usage:
+...............
+health
+...............
+
+[[cmdhelp_cluster_init,Initializes a new HA cluster,From Code]]
+==== `init`
+See "crm cluster help init" or "crm cluster init --help"
+
+[[cmdhelp_cluster_join,Join existing cluster,From Code]]
+==== `join`
+See "crm cluster help join" or "crm cluster join --help"
+
+[[cmdhelp_cluster_remove,Remove node(s) from the cluster,From Code]]
+==== `remove`
+See "crm cluster help remove" or "crm cluster remove --help"
+
+[[cmdhelp_cluster_crash_test,Cluster crash test tool set,From Code]]
+==== `crash_test`
+See "crm cluster help crash_test" or "crm cluster crash_test --help"
+
+[[cmdhelp_cluster_restart,Restart cluster services]]
+==== `restart`
+
+Restarts the cluster-related system services on this node.
+
+Usage:
+.........
+restart
+.........
+
+[[cmdhelp_cluster_rename,Rename the cluster]]
+==== `rename`
+
+Rename the cluster name
+
+Usage:
+...............
+rename <new_cluster_name>
+...............
+
+
+[[cmdhelp_cluster_run,Execute an arbitrary command on all nodes/specific node]]
+==== `run`
+
+This command takes a shell statement as argument, executes that
+statement on all nodes in the cluster or a specific node,
+and reports the result.
+
+Usage:
+...............
+run <command> [node ...]
+...............
+
+Example:
+...............
+run "cat /proc/uptime"
+run "ls" node1 node2
+...............
+
+[[cmdhelp_cluster_start,Start cluster services]]
+==== `start`
+
+Starts the cluster-related system services on this node.
+
+Usage:
+.........
+start
+.........
+
+[[cmdhelp_cluster_status,Cluster status check]]
+==== `status`
+
+Reports the status for the cluster messaging layer on the local
+node.
+
+Usage:
+...............
+status
+...............
+
+[[cmdhelp_cluster_stop,Stop cluster services]]
+==== `stop`
+
+Stops the cluster-related system services on this node.
+
+Usage:
+.........
+stop
+.........
+
+[[cmdhelp_cluster_wait_for_startup,Wait for cluster to start]]
+==== `wait_for_startup`
+
+Mostly useful in scripts or automated workflows, this command will
+attempt to connect to the local cluster node repeatedly. The command
+will keep trying until the cluster node responds, or the `timeout`
+elapses. The timeout can be changed by supplying a value in seconds as
+an argument.
+
+Usage:
+........
+wait_for_startup
+........
+
+[[cmdhelp_script,Cluster script management]]
+=== `script` - Cluster script management
+
+A big part of the configuration and management of a cluster is
+collecting information about all cluster nodes and deploying changes
+to those nodes. Often, just performing the same procedure on all nodes
+will encounter problems, due to subtle differences in the
+configuration.
+
+For example, when configuring a cluster for the first time, the
+software needs to be installed and configured on all nodes before the
+cluster software can be launched and configured using `crmsh`. This
+process is cumbersome and error-prone, and the goal is for scripts to
+make this process easier.
+
+Scripts are implemented using the python `parallax` package which
+provides a thin wrapper on top of SSH. This allows the scripts to
+function through the usual SSH channels used for system maintenance,
+requiring no additional software to be installed or maintained.
+
+[[cmdhelp_script_json,JSON API for cluster scripts]]
+==== `json`
+
+This command provides a JSON API for the cluster scripts, intended for
+use in user interface tools that want to interact with the cluster via
+scripts.
+
+The command takes a single argument, which should be a JSON array with
+the first member identifying the command to perform.
+
+The output is line-based: Commands that return multiple results will
+return them line-by-line, ending with a terminator value: "end".
+
+When providing parameter values to this command, they should be
+provided as nested objects, so +virtual-ip:ip=192.168.0.5+ on the
+command line becomes the JSON object
++{"virtual-ip":{"ip":"192.168.0.5"}}+.
+
+API:
+........
+["list"]
+=> [{name, shortdesc, category}]
+
+["show", <name>]
+=> [{name, shortdesc, longdesc, category, <<steps>>}]
+
+<<steps>> := [{name, shortdesc], longdesc, required, parameters, steps}]
+
+<<params>> := [{name, shortdesc, longdesc, required, unique, advanced,
+ type, value, example}]
+
+["verify", <name>, <<values>>]
+=> [{shortdesc, longdesc, text, nodes}]
+
+["run", <name>, <<values>>]
+=> [{shortdesc, rc, output|error}]
+........
+
+
+[[cmdhelp_script_list,List available scripts]]
+==== `list`
+
+Lists the available scripts, sorted by category. Scripts that have the
+special `Script` category are hidden by default, since they are mainly
+used by other scripts or commands. To also show these, pass `all` as
+argument.
+
+To get a flat list of script names, not sorted by category, pass
+`names` as an extra argument.
+
+Usage:
+............
+list [all] [names]
+............
+
+Example:
+............
+list
+list all names
+............
+
+[[cmdhelp_script_run,Run the script]]
+==== `run`
+
+Given a list of parameter values, this command will execute the
+actions specified by the cluster script. The format for the parameter
+values is the same as for the `verify` command.
+
+Can optionally take at least two parameters:
+* `nodes=<nodes>`: List of nodes that the script runs over
+* `dry_run=yes|no`: If set, the script will not perform any modifications.
+
+Additional parameters may be available depending on the script.
+
+Use the `show` command to see what parameters are available.
+
+Usage:
+.............
+run <script> [args...]
+.............
+
+Example:
+.............
+run apache install=true
+run sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+.............
+
+[[cmdhelp_script_show,Describe the script]]
+==== `show`
+
+Prints a description and short summary of the script, with
+descriptions of the accepted parameters.
+
+Advanced parameters are hidden by default. To show the complete list
+of parameters accepted by the script, pass `all` as argument.
+
+Usage:
+............
+show <script> [all]
+............
+
+Example:
+............
+show virtual-ip
+............
+
+[[cmdhelp_script_verify,Verify the script]]
+==== `verify`
+
+Checks the given parameter values, and returns a list
+of actions that will be executed when running the script
+if provided the same list of parameter values.
+
+Usage:
+............
+verify <script> [args...]
+............
+
+Example:
+............
+verify sbd id=sbd-1 node=node1 sbd_device=/dev/disk/by-uuid/F00D-CAFE
+............
+
+[[cmdhelp_corosync,Corosync management]]
+=== `corosync` - Corosync management
+
+Corosync is the underlying messaging layer for most HA clusters.
+This level provides commands for editing and managing the corosync
+configuration.
+
+[[cmdhelp_corosync_add-node,Add a corosync node]]
+==== `add-node`
+
+Adds a node to the corosync configuration. This is used with the `udpu`
+type configuration in corosync.
+
+A nodeid for the added node is generated automatically.
+
+Note that this command assumes that only a single ring is used, and
+sets only the address for ring0.
+
+Usage:
+.........
+add-node <addr> [name]
+.........
+
+[[cmdhelp_corosync_del-node,Remove a corosync node]]
+==== `del-node`
+
+Removes a node from the corosync configuration. The argument given is
+the `ring0_addr` address set in the configuration file.
+
+Usage:
+.........
+del-node <addr>
+.........
+
+[[cmdhelp_corosync_diff,Diffs the corosync configuration]]
+==== `diff`
+
+Diffs the corosync configurations on different nodes. If no nodes are
+given as arguments, the corosync configurations on all nodes in the
+cluster are compared.
+
+`diff` takes an option argument `--checksum`, to display a checksum
+for each file instead of calculating a diff.
+
+Usage:
+.........
+diff [--checksum] [node...]
+.........
+
+[[cmdhelp_corosync_edit,Edit the corosync configuration]]
+==== `edit`
+
+Opens the Corosync configuration file in an editor.
+
+Usage:
+.........
+edit
+.........
+
+[[cmdhelp_corosync_get,Get a corosync configuration value]]
+==== `get`
+
+Returns the value configured in `corosync.conf`, which is not
+necessarily the value used in the running configuration. See `reload`
+for telling corosync about configuration changes.
+
+The argument is the complete dot-separated path to the value.
+
+If there are multiple values configured with the same path, the
+command returns all values for that path. For example, to get all
+configured `ring0_addr` values, use this command:
+
+Example:
+........
+get nodelist.node.ring0_addr
+........
+
+[[cmdhelp_corosync_log,Show the corosync log file]]
+==== `log`
+
+Opens the log file specified in the corosync configuration file. If no
+log file is configured, this command returns an error.
+
+The pager used can be configured either using the PAGER
+environment variable or in `crm.conf`.
+
+Usage:
+.........
+log
+.........
+
+[[cmdhelp_corosync_pull,Pulls the corosync configuration]]
+==== `pull`
+
+Gets the corosync configuration from another node and copies
+it to this node.
+
+Usage:
+.........
+pull <node>
+.........
+
+[[cmdhelp_corosync_push,Push the corosync configuration]]
+==== `push`
+
+Pushes the corosync configuration file on this node to
+the list of nodes provided. If no target nodes are given,
+the configuration is pushed to all other nodes in the cluster.
+
+It is recommended to use `csync2` to distribute the cluster
+configuration files rather than relying on this command.
+
+Usage:
+.........
+push [node] ...
+.........
+
+Example:
+.........
+push node-2 node-3
+.........
+
+[[cmdhelp_corosync_reload,Reload the corosync configuration]]
+==== `reload`
+
+Tells all instances of corosync in this cluster to reload
+`corosync.conf`.
+
+After pushing a new configuration to all cluster nodes, call this
+command to make corosync use the new configuration.
+
+Usage:
+.........
+reload
+.........
+
+[[cmdhelp_corosync_set,Set a corosync configuration value]]
+==== `set`
+
+Sets the value identified by the given path. If the value does not
+exist in the configuration file, it will be added. However, if the
+section containing the value does not exist, the command will fail.
+
+Usage:
+.........
+set quorum.expected_votes 2
+.........
+
+[[cmdhelp_corosync_show,Display the corosync configuration]]
+==== `show`
+
+Displays the corosync configuration on the current node.
+
+.........
+show
+.........
+
+[[cmdhelp_corosync_status,Display the corosync status]]
+==== `status`
+
+Displays the corosync ring status(default), also can display quorum/qdevice/qnetd status.
+
+Usage:
+.........
+status [ring|quorum|qdevice|qnetd]
+.........
+
+[[cmdhelp_cib,CIB shadow management]]
+=== `cib` - CIB shadow management
+
+This level is for management of shadow CIBs. It is available both
+at the top level and the `configure` level.
+
+All the commands are implemented using `cib_shadow(8)` and the
+`CIB_shadow` environment variable. The user prompt always
+includes the name of the currently active shadow or the live CIB.
+
+[[cmdhelp_cib_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_cib_commit,copy a shadow CIB to the cluster]]
+==== `commit`
+
+Apply a shadow CIB to the cluster. If the shadow name is omitted
+then the current shadow CIB is applied.
+
+Temporary shadow CIBs are removed automatically on commit.
+
+Usage:
+...............
+commit [<cib>]
+...............
+
+[[cmdhelp_cib_delete,delete a shadow CIB]]
+==== `delete`
+
+Delete an existing shadow CIB.
+
+Usage:
+...............
+delete <cib>
+...............
+
+[[cmdhelp_cib_diff,diff between the shadow CIB and the live CIB]]
+==== `diff`
+
+Print differences between the current cluster configuration and
+the active shadow CIB.
+
+Usage:
+...............
+diff
+...............
+
+[[cmdhelp_cib_import,import a CIB or PE input file to a shadow]]
+==== `import`
+
+At times it may be useful to create a shadow file from the
+existing CIB. The CIB may be specified as file or as a PE input
+file number. The shell will look up files in the local directory
+first and then in the PE directory (typically `/var/lib/pengine`).
+Once the CIB file is found, it is copied to a shadow and this
+shadow is immediately available for use at both `configure` and
+`cibstatus` levels.
+
+If the shadow name is omitted then the target shadow is named
+after the input CIB file.
+
+Note that there are often more than one PE input file, so you may
+need to specify the full name.
+
+Usage:
+...............
+import {<file>|<number>} [<shadow>]
+...............
+Examples:
+...............
+import pe-warn-2222
+import 2289 issue2
+...............
+
+[[cmdhelp_cib_list,list all shadow CIBs]]
+==== `list`
+
+List existing shadow CIBs.
+
+Usage:
+...............
+list
+...............
+
+[[cmdhelp_cib_new,create a new shadow CIB]]
+==== `new`
+
+Create a new shadow CIB. The live cluster configuration and
+status is copied to the shadow CIB.
+
+If the name of the shadow is omitted, we create a temporary CIB
+shadow. It is useful if multiple level sessions are desired
+without affecting the cluster. A temporary CIB shadow is short
+lived and will be removed either on `commit` or on program exit.
+Note that if the temporary shadow is not committed all changes in
+the temporary shadow are lost.
+
+Specify `withstatus` if you want to edit the status section of
+the shadow CIB (see the <<cmdhelp_cibstatus,cibstatus section>>).
+Add `force` to force overwriting the existing shadow CIB.
+
+To start with an empty configuration that is not copied from the live
+CIB, specify the `empty` keyword. (This also allows a shadow CIB to be
+created in case no cluster is running.)
+
+Usage:
+...............
+new [<cib>] [withstatus] [force] [empty]
+...............
+
+[[cmdhelp_cib_reset,copy live cib to a shadow CIB]]
+==== `reset`
+
+Copy the current cluster configuration into the shadow CIB.
+
+Usage:
+...............
+reset <cib>
+...............
+
+[[cmdhelp_cib_use,change working CIB]]
+==== `use`
+
+Choose a CIB source. If you want to edit the status from the
+shadow CIB specify `withstatus` (see <<cmdhelp_cibstatus,`cibstatus`>>).
+Leave out the CIB name to switch to the running CIB.
+
+Usage:
+...............
+use [<cib>] [withstatus]
+...............
+
+[[cmdhelp_ra,Resource Agents (RA) lists and documentation]]
+=== `ra` - Resource Agents (RA) lists and documentation
+
+This level contains commands which show various information about
+the installed resource agents. It is available both at the top
+level and at the `configure` level.
+
+[[cmdhelp_ra_classes,list classes and providers]]
+==== `classes`
+
+Print all resource agents' classes and, where appropriate, a list
+of available providers.
+
+Usage:
+...............
+classes
+...............
+
+[[cmdhelp_ra_info,show meta data for a RA]]
+==== `info` (`meta`)
+
+Show the meta-data of a resource agent type. This is where users
+can find information on how to use a resource agent. It is also
+possible to get information from some programs: `pengine`,
+`crmd`, `cib`, and `stonithd`. Just specify the program name
+instead of an RA.
+
+Usage:
+...............
+info [<class>:[<provider>:]]<type>
+info <type> <class> [<provider>] (obsolete)
+...............
+Example:
+...............
+info apache
+info ocf:pacemaker:Dummy
+info stonith:ipmilan
+info pengine
+...............
+
+[[cmdhelp_ra_list,list RA for a class (and provider)]]
+==== `list`
+
+List available resource agents for the given class. If the class
+is `ocf`, supply a provider to get agents which are available
+only from that provider.
+
+Usage:
+...............
+list <class> [<provider>]
+...............
+Example:
+...............
+list ocf pacemaker
+...............
+
+[[cmdhelp_ra_providers,show providers for a RA and a class]]
+==== `providers`
+
+List providers for a resource agent type. The class parameter
+defaults to `ocf`.
+
+Usage:
+...............
+providers <type> [<class>]
+...............
+Example:
+...............
+providers apache
+...............
+
+[[cmdhelp_ra_validate,validate parameters for RA]]
+==== `validate`
+
+If the resource agent supports the `validate-all` action, this calls
+the action with the given parameters, printing any warnings or errors
+reported by the agent.
+
+Usage:
+................
+validate <agent> [<key>=<value> ...]
+................
+
+[[cmdhelp_resource,Resource management]]
+=== `resource` - Resource management
+
+At this level resources may be managed.
+
+All (or almost all) commands are implemented with the CRM tools
+such as `crm_resource(8)`.
+
+[[cmdhelp_resource_ban,ban a resource from a node]]
+==== `ban`
+
+Ban a resource from running on a certain node. If no node is given
+as argument, the resource is banned from the current location.
+
+See `move` for details on other arguments.
+
+Usage:
+...............
+ban <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_cleanup,cleanup resource status]]
+==== `cleanup`
+
+If resource has any past failures, clear its history and fail
+count. Typically done after the resource has temporarily
+failed.
+
+If a node is omitted, cleanup on all nodes.
+
++(Pacemaker 1.1.14)+ Pass force to cleanup the resource itself,
+otherwise the cleanup command will apply to the parent resource (if
+any).
+
+Usage:
+...............
+cleanup [<rsc>] [<node>] [force]
+...............
+
+[[cmdhelp_resource_clear,Clear any relocation constraint]]
+==== `clear` (`unmove`, `unmigrate`, `unban`)
+
+Remove any relocation constraint created by
+the `move`, `migrate` or `ban` command.
+
+Usage:
+...............
+clear <rsc>
+unmigrate <rsc>
+unban <rsc>
+...............
+
+[[cmdhelp_resource_constraints,Show constraints affecting a resource]]
+==== `constraints`
+
+Display the location and colocation constraints affecting the
+resource.
+
+Usage:
+................
+constraints <rsc>
+................
+
+[[cmdhelp_resource_demote,demote a master-slave resource]]
+==== `demote`
+
+Demote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+demote <rsc>
+...............
+
+[[cmdhelp_resource_failcount,manage failcounts]]
+==== `failcount`
+
+Show/edit/delete the failcount of a resource.
+When `set` a non-zero value, `operation` and `interval` should be
+provided when multiple operation failcount entries exist.
+`interval` is a value in seconds.
+
+Usage:
+...............
+failcount <rsc> set <node> <value> [operation] [interval]
+failcount <rsc> delete <node>
+failcount <rsc> show <node>
+...............
+Example:
+...............
+failcount fs_0 delete node2
+...............
+
+[[cmdhelp_resource_locate,show the location of resources]]
+==== `locate`
+
+Show the current location of one or more resources.
+
+Usage:
+...............
+locate [<rsc> ...]
+...............
+
+[[cmdhelp_resource_maintenance,Enable/disable per-resource maintenance mode]]
+==== `maintenance`
+
+Enables or disables the per-resource maintenance mode. When this mode
+is enabled, no monitor operations will be triggered for the resource.
+`maintenance` attribute conflicts with the `is-managed`. When setting
+the `maintenance` attribute, the user is proposed to remove the
+`is-managed` attribute if it exists.
+
+Usage:
+..................
+maintenance <resource> [on|off|true|false]
+..................
+
+Example:
+..................
+maintenance rsc1
+maintenance rsc2 off
+..................
+
+[[cmdhelp_resource_manage,put a resource into managed mode]]
+==== `manage`
+
+Manage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+`is-managed` attribute conflicts with the `maintenance`. When setting
+the `is-managed` attribute, the user is proposed to remove the
+`maintenance` attribute if it exists.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+manage <rsc>
+...............
+
+[[cmdhelp_resource_meta,manage a meta attribute]]
+==== `meta`
+
+Show/edit/delete a meta attribute of a resource. Currently, all
+meta attributes of a resource may be managed with other commands
+such as `resource stop`.
+
+Usage:
+...............
+meta <rsc> set <attr> <value>
+meta <rsc> delete <attr>
+meta <rsc> show <attr>
+...............
+Example:
+...............
+meta ip_0 set target-role stopped
+...............
+
+[[cmdhelp_resource_move,Move a resource to another node]]
+==== `move` (`migrate`)
+
+Move a resource away from its current location.
+
+If the destination node is left out, the resource is migrated by
+creating a constraint which prevents it from running on the current
+node. For this type of constraint to be created, the +force+ argument
+is required.
+
+A lifetime may be given for the constraint. Once it expires, the
+location constraint will no longer be active.
+
+Usage:
+...............
+move <rsc> [<node>] [<lifetime>] [force]
+...............
+
+[[cmdhelp_resource_operations,Show active resource operations]]
+==== `operations`
+
+Show active operations, optionally filtered by resource and node.
+
+Usage:
+................
+operations [<rsc>] [<node>]
+................
+
+[[cmdhelp_resource_param,manage a parameter of a resource]]
+==== `param`
+
+Show/edit/delete a parameter of a resource.
+
+Usage:
+...............
+param <rsc> set <param> <value>
+param <rsc> delete <param>
+param <rsc> show <param>
+...............
+Example:
+...............
+param ip_0 show ip
+...............
+
+[[cmdhelp_resource_promote,promote a master-slave resource]]
+==== `promote`
+
+Promote a master-slave resource using the `target-role`
+attribute.
+
+Usage:
+...............
+promote <rsc>
+...............
+
+[[cmdhelp_resource_refresh,Recheck current resource status and drop failure history]]
+==== `refresh`
+
+Delete resource's history (including failures) so its current state is rechecked.
+
+Usage:
+...............
+refresh [<rsc>] [<node>] [force]
+...............
+
+[[cmdhelp_resource_restart,restart resources]]
+==== `restart`
+
+Restart one or more resources. This is essentially a shortcut for
+resource stop followed by a start. The shell is first going to wait
+for the stop to finish, that is for all resources to really stop, and
+only then to order the start action. Due to this command
+entailing a whole set of operations, informational messages are
+printed to let the user see some progress.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+restart <rsc> [<rsc> ...]
+...............
+Example:
+...............
+# crm resource restart g_webserver
+INFO: ordering g_webserver to stop
+waiting for stop to finish .... done
+INFO: ordering g_webserver to start
+#
+...............
+
+[[cmdhelp_resource_scores,Display resource scores]]
+==== `scores`
+
+Display the allocation scores for all resources.
+
+Usage:
+................
+scores
+................
+
+[[cmdhelp_resource_secret,manage sensitive parameters]]
+==== `secret`
+
+Sensitive parameters can be kept in local files rather than CIB
+in order to prevent accidental data exposure. Use the `secret`
+command to manage such parameters. `stash` and `unstash` move the
+value from the CIB and back to the CIB respectively. The `set`
+subcommand sets the parameter to the provided value. `delete`
+removes the parameter completely. `show` displays the value of
+the parameter from the local file. Use `check` to verify if the
+local file content is valid.
+
+Usage:
+...............
+secret <rsc> set <param> <value>
+secret <rsc> stash <param>
+secret <rsc> unstash <param>
+secret <rsc> delete <param>
+secret <rsc> show <param>
+secret <rsc> check <param>
+...............
+Example:
+...............
+secret fence_1 show password
+secret fence_1 stash password
+secret fence_1 set password secret_value
+...............
+
+[[cmdhelp_resource_start,start resources]]
+==== `start`
+
+Start one or more resources by setting the `target-role` attribute. If
+there are multiple meta attributes sets, the attribute is set in all
+of them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+start <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_status,show status of resources]]
+==== `status` (`show`, `list`)
+
+Print resource status. More than one resource can be shown at once. If
+the resource parameter is left out, the status of all resources is
+printed.
+
+Usage:
+...............
+status [<rsc> ...]
+...............
+
+[[cmdhelp_resource_stop,stop resources]]
+==== `stop`
+
+Stop one or more resources using the `target-role` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `target-role` attributes are
+removed from the children resources.
+
+For details on group management see
+<<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+stop <rsc> [<rsc> ...]
+...............
+
+[[cmdhelp_resource_trace,start RA tracing]]
+==== `trace`
+
+Start tracing RA for the given operation. The trace files are
+stored in `$HA_VARLIB/trace_ra`. If the operation to be traced is
+monitor, note that the number of trace files can grow very
+quickly.
+
+If no operation name is given, crmsh will attempt to trace all
+operations for the RA. This includes any configured operations, start
+and stop as well as promote/demote for multistate resources.
+
+To trace the probe operation which exists for all resources, either
+set a trace for `monitor` with interval `0`, or use `probe` as the
+operation name.
+
+Note: RA tracing is only supported by OCF resource agents;
+The pacemaker-execd daemon does not log recurring monitor operations
+unless an error occurred.
+
+Usage:
+...............
+trace <rsc> [<op> [<interval>] ]
+...............
+Example:
+...............
+trace fs start
+trace webserver
+trace webserver probe
+trace fs monitor 0
+...............
+
+[[cmdhelp_resource_unmanage,put a resource into unmanaged mode]]
+==== `unmanage`
+
+Unmanage a resource using the `is-managed` attribute. If there
+are multiple meta attributes sets, the attribute is set in all of
+them. If the resource is a clone, all `is-managed` attributes are
+removed from the children resources.
+
+For details on group management see <<cmdhelp_options_manage-children,`options manage-children`>>.
+
+Usage:
+...............
+unmanage <rsc>
+...............
+
+[[cmdhelp_resource_untrace,stop RA tracing]]
+==== `untrace`
+
+Stop tracing RA for the given operation. If no operation name is
+given, crmsh will attempt to stop tracing all operations in resource.
+
+Usage:
+...............
+untrace <rsc> [<op> [<interval>] ]
+...............
+Example:
+...............
+untrace fs start
+untrace webserver
+...............
+
+[[cmdhelp_resource_utilization,manage a utilization attribute]]
+==== `utilization`
+
+Show/edit/delete a utilization attribute of a resource. These
+attributes describe hardware requirements. By setting the
+`placement-strategy` cluster property appropriately, it is
+possible then to distribute resources based on resource
+requirements and node size. See also <<cmdhelp_node_utilization,node utilization attributes>>.
+
+Usage:
+...............
+utilization <rsc> set <attr> <value>
+utilization <rsc> delete <attr>
+utilization <rsc> show <attr>
+...............
+Example:
+...............
+utilization xen1 set memory 4096
+...............
+
+[[cmdhelp_node,Node management]]
+=== `node` - Node management
+
+Node management and status commands.
+
+[[cmdhelp_node_attribute,manage attributes]]
+==== `attribute`
+
+Edit node attributes. This kind of attribute should refer to
+relatively static properties, such as memory size.
+
+Usage:
+...............
+attribute <node> set <attr> <value>
+attribute <node> delete <attr>
+attribute <node> show <attr>
+...............
+Example:
+...............
+attribute node_1 set memory_size 4096
+...............
+
+[[cmdhelp_node_clearstate,Clear node state]]
+==== `clearstate`
+
+Resets and clears the state of the specified node. This node is
+afterwards assumed clean and offline. This command can be used to
+manually confirm that a node has been fenced (e.g., powered off).
+
+Be careful! This can cause data corruption if you confirm that a node is
+down that is, in fact, not cleanly down - the cluster will proceed as if
+the fence had succeeded, possibly starting resources multiple times.
+
+Usage:
+...............
+clearstate <node>
+...............
+
+[[cmdhelp_node_delete,delete node]]
+==== `delete`
+
+Delete a node. This command will remove the node from the CIB
+and, in case the cluster stack is running, use the appropriate
+program (`crm_node` or `hb_delnode`) to remove the node from the
+membership.
+
+If the node is still listed as active and a member of our
+partition we refuse to remove it. With the global force option
+(`-F`) we will try to delete the node anyway.
+
+Usage:
+...............
+delete <node>
+...............
+
+[[cmdhelp_node_fence,fence node]]
+==== `fence`
+
+Make CRM fence a node. This functionality depends on stonith
+resources capable of fencing the specified node. No such stonith
+resources, no fencing will happen.
+
+Usage:
+...............
+fence <node>
+...............
+
+[[cmdhelp_node_maintenance,put node into maintenance mode]]
+==== `maintenance`
+
+Set the node status to maintenance. This is equivalent to the
+cluster-wide `maintenance-mode` property but puts just one node
+into the maintenance mode. If there are maintenaned resources on
+the node, the user will be proposed to remove the maintenance
+property from them.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+maintenance [<node>]
+...............
+
+[[cmdhelp_node_online,set node online]]
+==== `online`
+
+Set a node to online status.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+online [<node>]
+...............
+
+[[cmdhelp_node_ready,put node into ready mode]]
+==== `ready`
+
+Set the node's maintenance status to `off`. The node should be
+now again fully operational and capable of running resource
+operations.
+
+The node parameter defaults to the node where the command is run.
+
+Usage:
+...............
+ready [<node>]
+...............
+
+[[cmdhelp_node_server,show node hostname or server address]]
+==== `server`
+
+Remote nodes may have a configured server address which should
+be used when contacting the node. This command prints the
+server address if configured, else the node name.
+
+If no parameter is given, the addresses or names for all nodes
+are printed.
+
+Usage:
+...............
+server [<node> ...]
+...............
+
+[[cmdhelp_node_show,show node]]
+==== `show`
+
+Show a node definition. If the node parameter is omitted then all
+nodes are shown.
+
+Usage:
+...............
+show [<node>]
+...............
+
+[[cmdhelp_node_standby,put node into standby]]
+==== `standby`
+
+Set a node to standby status. The node parameter defaults to the
+node where the command is run.
+
+Additionally, you may specify a lifetime for the standby---if set to
+`reboot`, the node will be back online once it reboots. `forever` will
+keep the node in standby after reboot. The life time defaults to
+`forever`.
+
+Usage:
+...............
+standby [<node>] [<lifetime>]
+
+lifetime :: reboot | forever
+...............
+
+Example:
+...............
+standby bob reboot
+...............
+
+[[cmdhelp_node_status-attr,manage status attributes]]
+==== `status-attr`
+
+Edit node attributes which are in the CIB status section, i.e.
+attributes which hold properties of a more volatile nature. One
+typical example is attribute generated by the `pingd` utility.
+
+Usage:
+...............
+status-attr <node> set <attr> <value>
+status-attr <node> delete <attr>
+status-attr <node> show <attr>
+...............
+Example:
+...............
+status-attr node_1 show pingd
+...............
+
+[[cmdhelp_node_utilization,manage utilization attributes]]
+==== `utilization`
+
+Edit node utilization attributes. These attributes describe
+hardware characteristics as integer numbers such as memory size
+or the number of CPUs. By setting the `placement-strategy`
+cluster property appropriately, it is possible then to distribute
+resources based on resource requirements and node size. See also
+<<cmdhelp_resource_utilization,resource utilization attributes>>.
+
+Usage:
+...............
+utilization <node> set <attr> <value>
+utilization <node> delete <attr>
+utilization <node> show <attr>
+...............
+Examples:
+...............
+utilization node_1 set memory 16384
+utilization node_1 show cpu
+...............
+
+[[cmdhelp_site,GEO clustering site support]]
+=== `site` - GEO clustering site support
+
+A cluster may consist of two or more subclusters in different and
+distant locations. This set of commands supports such setups.
+
+[[cmdhelp_site_ticket,manage site tickets]]
+==== `ticket`
+
+Tickets are cluster-wide attributes. They can be managed at the
+site where this command is executed.
+
+It is then possible to constrain resources depending on the
+ticket availability (see the <<cmdhelp_configure_rsc_ticket,`rsc_ticket`>> command
+for more details).
+
+Usage:
+...............
+ticket {grant|revoke|standby|activate|show|time|delete} <ticket>
+...............
+Example:
+...............
+ticket grant ticket1
+...............
+
+[[cmdhelp_options,User preferences]]
+=== `options` - User preferences
+
+The user may set various options for the crm shell itself.
+
+[[cmdhelp_options_add-quotes,add quotes around parameters containing spaces]]
+==== `add-quotes`
+
+The shell (as in `/bin/sh`) parser strips quotes from the command
+line. This may sometimes make it really difficult to type values
+which contain white space. One typical example is the configure
+filter command. The crm shell will supply extra quotes around
+arguments which contain white space. The default is `yes`.
+
+.Note on quotes use
+****************************
+Adding quotes around arguments automatically has been introduced
+with version 1.2.2 and it is technically a regression. Being a
+regression is the only reason the `add-quotes` option exists. If
+you have custom shell scripts which would break, just set the
+`add-quotes` option to `no`.
+
+For instance, with adding quotes enabled, it is possible to do
+the following:
+...............
+# crm configure primitive d1 Dummy \
+ meta description="some description here"
+# crm configure filter 'sed "s/hostlist=./&node-c /"' fencing
+...............
+****************************
+
+[[cmdhelp_options_check-frequency,when to perform semantic check]]
+==== `check-frequency`
+
+Semantic check of the CIB or elements modified or created may be
+done on every configuration change (`always`), when verifying
+(`on-verify`) or `never`. It is by default set to `always`.
+Experts may want to change the setting to `on-verify`.
+
+The checks require that resource agents are present. If they are
+not installed at the configuration time set this preference to
+`never`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_check-mode,how to treat semantic errors]]
+==== `check-mode`
+
+Semantic check of the CIB or elements modified or created may be
+done in the `strict` mode or in the `relaxed` mode. In the former
+certain problems are treated as configuration errors. In the
+`relaxed` mode all are treated as warnings. The default is `strict`.
+
+See <<topics_Features_Checks,Configuration semantic checks>> for more details.
+
+[[cmdhelp_options_colorscheme,set colors for output]]
+==== `colorscheme`
+
+With `output` set to `color`, a comma separated list of colors
+from this option are used to emphasize:
+
+- keywords
+- object ids
+- attribute names
+- attribute values
+- scores
+- resource references
+
+`crm` can show colors only if there is curses support for python
+installed (usually provided by the `python-curses` package). The
+colors are whatever is available in your terminal. Use `normal`
+if you want to keep the default foreground color.
+
+This user preference defaults to
+`yellow,normal,cyan,red,green,magenta` which is good for
+terminals with dark background. You may want to change the color
+scheme and save it in the preferences file for other color
+setups.
+
+Example:
+...............
+colorscheme yellow,normal,blue,red,green,magenta
+...............
+
+[[cmdhelp_options_editor,set preferred editor program]]
+==== `editor`
+
+The `edit` command invokes an editor. Use this to specify your
+preferred editor program. If not set, it will default to either
+the value of the `EDITOR` environment variable or to one of the
+standard UNIX editors (`vi`,`emacs`,`nano`).
+
+Usage:
+...............
+editor program
+...............
+Example:
+...............
+editor vim
+...............
+
+[[cmdhelp_options_manage-children,how to handle children resource attributes]]
+==== `manage-children`
+
+Some resource management commands, such as `resource stop`, when
+the target resource is a group, may not always produce desired
+result. Each element, group and the primitive members, can have a
+meta attribute and those attributes may end up with conflicting
+values. Consider the following construct:
+...............
+crm(live)# configure show svc fs virtual-ip
+primitive fs Filesystem \
+ params device="/dev/drbd0" directory="/srv/nfs" fstype=ext3 \
+ op monitor interval=10s \
+ meta target-role=Started
+primitive virtual-ip IPaddr2 \
+ params ip=10.2.13.110 iflabel=1 \
+ op monitor interval=10s \
+ op start interval=0 \
+ meta target-role=Started
+group svc fs virtual-ip \
+ meta target-role=Stopped
+...............
+
+Even though the element +svc+ should be stopped, the group is
+actually running because all its members have the +target-role+
+set to +Started+:
+...............
+crm(live)# resource show svc
+resource svc is running on: xen-f
+...............
+
+Hence, if the user invokes +resource stop svc+ the intention is
+not clear. This preference gives the user an opportunity to
+better control what happens if attributes of group members have
+values which are in conflict with the same attribute of the group
+itself.
+
+Possible values are +ask+ (the default), +always+, and +never+.
+If set to +always+, the crm shell removes all children attributes
+which have values different from the parent. If set to +never+,
+all children attributes are left intact. Finally, if set to
++ask+, the user will be asked for each member what is to be done.
+
+[[cmdhelp_options_output,set output type]]
+==== `output`
+
+`crm` can adorn configurations in two ways: in color (similar to
+for instance the `ls --color` command) and by showing keywords in
+upper case. Possible values are `plain`, `color-always`, `color`,
+and 'uppercase'. It is possible to combine `uppercase` with one
+of the color values in order to get an upper case xmass tree. Just
+set this option to `color,uppercase` or `color-always,uppercase`.
+In case you need color codes in pipes, `color-always` forces color
+codes even in case the terminal is not a tty (just like `ls
+--color=always`).
+
+[[cmdhelp_options_pager,set preferred pager program]]
+==== `pager`
+
+The `view` command displays text through a pager. Use this to
+specify your preferred pager program. If not set, it will default
+to either the value of the `PAGER` environment variable or to one
+of the standard UNIX system pagers (`less`,`more`,`pg`).
+
+[[cmdhelp_options_reset,reset user preferences to factory defaults]]
+==== `reset`
+
+This command resets all user options to the defaults. If used as
+a single-shot command, the rc file (+$HOME/.config/crm/rc+) is
+reset to the defaults too.
+
+[[cmdhelp_options_save,save the user preferences to the rc file]]
+==== `save`
+
+Save current settings to the rc file (+$HOME/.config/crm/rc+). On
+further `crm` runs, the rc file is automatically read and parsed.
+
+[[cmdhelp_options_set,Set the value of a given option]]
+==== `set`
+
+Sets the value of an option. Takes the fully qualified
+name of the option as argument, as displayed by +show all+.
+
+The modified option value is stored in the user-local
+configuration file, usually found in +~/.config/crm/crm.conf+.
+
+Usage:
+........
+set <option> <value>
+........
+
+Example:
+........
+set color.warn "magenta bold"
+set editor nano
+........
+
+[[cmdhelp_options_show,show current user preference]]
+==== `show`
+
+Display all current settings.
+
+Given an option name as argument, `show` will display only the value
+of that argument.
+
+Given +all+ as argument, `show` displays all available user options.
+
+Usage:
+........
+show [all|<option>]
+........
+
+Example:
+........
+show
+show skill-level
+show all
+........
+
+[[cmdhelp_options_skill-level,set skill level]]
+==== `skill-level`
+
+Based on the skill-level setting, the user is allowed to use only
+a subset of commands. There are three levels: operator,
+administrator, and expert. The operator level allows only
+commands at the `resource` and `node` levels, but not editing
+or deleting resources. The administrator may do that and may also
+configure the cluster at the `configure` level and manage the
+shadow CIBs. The expert may do all.
+
+Usage:
+...............
+skill-level <level>
+
+level :: operator | administrator | expert
+...............
+
+.Note on security
+****************************
+The `skill-level` option is advisory only. There is nothing
+stopping any users change their skill level (see
+<<topics_Features_Security,Access Control Lists (ACL)>> on how to enforce
+access control).
+****************************
+
+[[cmdhelp_options_sort-elements,sort CIB elements]]
+==== `sort-elements`
+
+`crm` by default sorts CIB elements. If you want them appear in
+the order they were created, set this option to `no`.
+
+Usage:
+...............
+sort-elements {yes|no}
+...............
+Example:
+...............
+sort-elements no
+...............
+
+[[cmdhelp_options_user,set the cluster user]]
+==== `user`
+
+Sufficient privileges are necessary in order to manage a
+cluster: programs such as `crm_verify` or `crm_resource` and,
+ultimately, `cibadmin` have to be run either as `root` or as the
+CRM owner user (typically `hacluster`). You don't have to worry
+about that if you run `crm` as `root`. A more secure way is to
+run the program with your usual privileges, set this option to
+the appropriate user (such as `hacluster`), and setup the
+`sudoers` file.
+
+Usage:
+...............
+user system-user
+...............
+Example:
+...............
+user hacluster
+...............
+
+[[cmdhelp_options_wait,synchronous operation]]
+==== `wait`
+
+In normal operation, `crm` runs a command and gets back
+immediately to process other commands or get input from the user.
+With this option set to `yes` it will wait for the started
+transition to finish. In interactive mode dots are printed to
+indicate progress.
+
+Usage:
+...............
+wait {yes|no}
+...............
+Example:
+...............
+wait yes
+...............
+
+[[cmdhelp_configure,CIB configuration]]
+=== `configure` - CIB configuration
+
+This level enables all CIB object definition commands.
+
+The configuration may be logically divided into four parts:
+nodes, resources, constraints, and (cluster) properties and
+attributes. Each of these commands support one or more basic CIB
+objects.
+
+Nodes and attributes describing nodes are managed using the
+`node` command.
+
+Commands for resources are:
+
+- `primitive`
+- `monitor`
+- `group`
+- `clone`
+- `ms`/`master` (master-slave)
+
+In order to streamline large configurations, it is possible to
+define a template which can later be referenced in primitives:
+
+- `rsc_template`
+
+In that case the primitive inherits all attributes defined in the
+template.
+
+There are three types of constraints:
+
+- `location`
+- `colocation`
+- `order`
+
+It is possible to define fencing order (stonith resource
+priorities):
+
+- `fencing_topology`
+
+Finally, there are the cluster properties, resource meta
+attributes defaults, and operations defaults. All are just a set
+of attributes. These attributes are managed by the following
+commands:
+
+- `property`
+- `rsc_defaults`
+- `op_defaults`
+
+In addition to the cluster configuration, the Access Control
+Lists (ACL) can be setup to allow access to parts of the CIB for
+users other than +root+ and +hacluster+. The following commands
+manage ACL:
+
+- `user`
+- `role`
+
+In Pacemaker 1.1.12 and up, this command replaces the `user` command
+for handling ACLs:
+
+- `acl_target`
+
+The changes are applied to the current CIB only on ending the
+configuration session or using the `commit` command.
+
+Comments start with +#+ in the first line. The comments are tied
+to the element which follows. If the element moves, its comments
+will follow.
+
+[[cmdhelp_configure_acl_target,Define target access rights]]
+==== `acl_target`
+
+Defines an ACL target.
+
+Usage:
+................
+acl_target <tid> [<role> ...]
+................
+Example:
+................
+acl_target joe resource_admin constraint_editor
+................
+
+[[cmdhelp_configure_alert,Event-driven alerts]]
+==== `alert`
+
+.Version note
+****************************
+This feature is only available
+in Pacemaker 1.1.15+.
+****************************
+
+Event-driven alerts enables calling scripts whenever interesting
+events occur in the cluster (nodes joining or leaving, resources
+starting or stopping, etc.).
+
+The +path+ is an arbitrary file path to an alert script. Existing
+external scripts used with ClusterMon resources can be used as alert
+scripts, since the interface is compatible.
+
+Each alert may have a number of receipients configured. These will be
+passed to the script as arguments. The first recipient will also be
+passed as the +CRM_alert_recipient+ environment variable, for
+compatibility with existing scripts that only support one recipient.
+
+The available meta attributes are +timeout+ (default 30s) and
++timestamp-format+ (default `"%H:%M:%S.%06N"`).
+
+Some configurations may require each recipient to be delimited by
+brackets, to avoid ambiguity. In the example +alert-2+ below, the meta
+attribute for `timeout` is defined after the recipient, so the
+brackets are used to ensure that the meta attribute is set for the
+alert and not just the recipient. This can be avoided by setting any
+alert attributes before defining the recipients.
+
+Usage:
+...............
+alert <id> <path> \
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] \
+ [select [nodes | fencing | resources | attributes '{' <attribute> ... '}' ] ...] \
+ [to [{] <recipient>
+ [attributes <nvpair> ...] \
+ [meta <nvpair> ...] [}] \
+ ...]
+...............
+
+Example:
+...............
+alert alert-1 /srv/pacemaker/pcmk_alert_sample.sh \
+ to /var/log/cluster-alerts.log
+
+alert alert-2 /srv/pacemaker/example_alert.sh \
+ meta timeout=60s \
+ to { /var/log/cluster-alerts.log }
+
+alert alert-3 /srv/pacemaker/example_alert.sh \
+ select fencing \
+ to { /var/log/fencing-alerts.log }
+
+...............
+
+[[cmdhelp_configure_bundle,Container bundle]]
+==== `bundle`
+
+A bundle is a single resource specifying the settings, networking
+requirements, and storage requirements for any number of containers
+generated from the same container image.
+
+Pacemaker bundles support Docker (since version 1.1.17) and rkt (since
+version 1.1.18) container technologies.
+
+A bundle must contain exactly one +docker+ or +rkt+ element.
+
+The bundle definition may contain a reference to a primitive
+resource which defining the resource running inside the
+container.
+
+Example:
+...............
+
+primitive httpd-apache ocf:heartbeat:apache
+
+bundle httpd \
+ docker image=pcmk:httpd replicas=3 \
+ network ip-range-start=10.10.10.123 host-netmask=24 \
+ port-mapping port=80 \
+ storage \
+ storage-mapping target-dir=/var/www/html source-dir=/srv/www options=rw \
+ primitive httpd-apache
+
+...............
+
+[[cmdhelp_configure_cib,CIB shadow management]]
+==== `cib`
+
+This level is for management of shadow CIBs. It is available at
+the `configure` level to enable saving intermediate changes to a
+shadow CIB instead of to the live cluster. This short excerpt
+shows how:
+...............
+crm(live)configure# cib new test-2
+INFO: test-2 shadow CIB created
+crm(test-2)configure# commit
+...............
+Note how the current CIB in the prompt changed from +live+ to
++test-2+ after issuing the `cib new` command. See also the
+<<cmdhelp_cib,CIB shadow management>> for more information.
+
+[[cmdhelp_configure_cibstatus,CIB status management and editing]]
+==== `cibstatus`
+
+Enter edit and manage the CIB status section level. See the
+<<cmdhelp_cibstatus,CIB status management section>>.
+
+[[cmdhelp_configure_clone,define a clone]]
+==== `clone`
+
+The `clone` command creates a resource clone. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+clone <name> <rsc>
+ [description=<description>]
+ [meta <attr_list>]
+ [params <attr_list>]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+clone cl_fence apc_1 \
+ meta clone-node-max=1 globally-unique=false
+
+clone disk1 drbd1 \
+ meta promotable=true notify=true globally-unique=false
+...............
+
+[[cmdhelp_configure_colocation,colocate resources]]
+==== `colocation` (`collocation`)
+
+This constraint expresses the placement relation between two
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+The score is used to indicate the priority of the constraint. A
+positive score indicates that the resources should run on the same
+node. A negative score that they should not run on the same
+node. Values of positive or negative +infinity+ indicate a mandatory
+constraint.
+
+In the two resource form, the cluster will place +<with-rsc>+ first,
+and then decide where to put the +<rsc>+ resource.
+
+Collocation resource sets have an extra attribute (+sequential+)
+to allow for sets of resources which don't depend on each other
+in terms of state. The shell syntax for such sets is to put
+resources in parentheses.
+
+Sets cannot be nested.
+
+The optional +node-attribute+ can be used to colocate resources on a
+set of nodes and not necessarily on the same node. For example, by
+setting a node attribute +color+ on all nodes and setting the
++node-attribute+ value to +color+ as well, the colocated resources
+will be placed on any node that has the same color.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+colocation <id> <score>: <rsc>[:<role>] <with-rsc>[:<role>]
+ [node-attribute=<node_attr>]
+
+colocation <id> <score>: <resource_sets>
+ [node-attribute=<node_attr>]
+
+resource_sets :: <resource_set> [<resource_set> ...]
+
+resource_set :: ["("|"["] <rsc>[:<role>] [<rsc>[:<role>] ...] \
+ [<attributes>] [")"|"]"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+colocation never_put_apache_with_dummy -inf: apache dummy
+colocation c1 inf: A ( B C )
+...............
+
+[[cmdhelp_configure_commit,commit the changes to the CIB]]
+==== `commit`
+
+Commit the current configuration to the CIB in use. As noted
+elsewhere, commands in a configure session don't have immediate
+effect on the CIB. All changes are applied at one point in time,
+either using `commit` or when the user leaves the configure
+level. In case the CIB in use changed in the meantime, presumably
+by somebody else, the crm shell will refuse to apply the changes.
+
+If you know that it's fine to still apply them, add +force+ to the
+command line.
+
+To disable CIB patching and apply the changes by replacing the CIB
+completely, add +replace+ to the command line. Note that this can lead
+to previous changes being overwritten if some other process
+concurrently modifies the CIB.
+
+Usage:
+...............
+commit [force] [replace]
+...............
+
+[[cmdhelp_configure_default-timeouts,set timeouts for operations to minimums from the meta-data]]
+==== `default-timeouts`
+
+This command takes the timeouts from the actions section of the
+resource agent meta-data and sets them for the operations of the
+primitive.
+
+Usage:
+...............
+default-timeouts <id> [<id>...]
+...............
+
+.Note on `default-timeouts`
+****************************
+The use of this command is discouraged in favor of manually
+determining the best timeouts required for the particular
+configuration. Relying on the resource agent to supply appropriate
+timeouts can cause the resource to fail at the worst possible moment.
+
+Appropriate timeouts for resource actions are context-sensitive, and
+should be carefully considered with the whole configuration in mind.
+****************************
+
+[[cmdhelp_configure_delete,delete CIB objects]]
+==== `delete`
+
+Delete one or more objects. If an object to be deleted belongs to
+a container object, such as a group, and it is the only resource
+in that container, then the container is deleted as well. Any
+related constraints are removed as well.
+
+If the object is a started resource, it will not be deleted unless the
++--force+ flag is passed to the command, or the +force+ option is set.
+
+Usage:
+...............
+delete [--force] <id> [<id>...]
+...............
+
+[[cmdhelp_configure_edit,edit CIB objects]]
+==== `edit`
+
+This command invokes the editor with the object description. As
+with the `show` command, the user may choose to edit all objects
+or a set of objects.
+
+If the user insists, he or she may edit the XML edition of the
+object. If you do that, don't modify any id attributes.
+
+Usage:
+...............
+edit [xml] [<id> ...]
+edit [xml] changed
+...............
+
+.Note on renaming element ids
+****************************
+The edit command sometimes cannot properly handle modifying
+element ids. In particular for elements which belong to group or
+ms resources. Group and ms resources themselves also cannot be
+renamed. Please use the `rename` command instead.
+****************************
+
+[[cmdhelp_configure_erase,erase the CIB]]
+==== `erase`
+
+The `erase` clears all configuration. Apart from nodes. To remove
+nodes, you have to specify an additional keyword `nodes`.
+
+Note that removing nodes from the live cluster may have some
+strange/interesting/unwelcome effects.
+
+Usage:
+...............
+erase [nodes]
+...............
+
+[[cmdhelp_configure_fencing_topology,node fencing order]]
+==== `fencing_topology`
+
+If multiple fencing (stonith) devices are available capable of
+fencing a node, their order may be specified by +fencing_topology+.
+The order is specified per node.
+
+Stonith resources can be separated by +,+ in which case all of
+them need to succeed. If they fail, the next stonith resource (or
+set of resources) is used. In other words, use comma to separate
+resources which all need to succeed and whitespace for serial
+order. It is not allowed to use whitespace around comma.
+
+If the node is left out, the order is used for all nodes.
+That should reduce the configuration size in some stonith setups.
+
+From Pacemaker version 1.1.14, it is possible to use a node attribute
+as the +target+ in a fencing topology. The syntax for this usage is
+described below.
+
+From Pacemaker version 1.1.14, it is also possible to use regular
+expression patterns as the +target+ in a fencing topology. The configured
+fencing sequence then applies to all devices matching the pattern.
+
+Usage:
+...............
+fencing_topology <stonith_resources> [<stonith_resources> ...]
+fencing_topology <fencing_order> [<fencing_order> ...]
+
+fencing_order :: <target> <stonith_resources> [<stonith_resources> ...]
+
+stonith_resources :: <rsc>[,<rsc>...]
+target :: <node>: | attr:<node-attribute>=<value> | pattern:<pattern>
+...............
+Example:
+...............
+# Only kill the power if poison-pill fails
+fencing_topology poison-pill power
+
+# As above for node-a, but a different strategy for node-b
+fencing_topology \
+ node-a: poison-pill power \
+ node-b: ipmi serial
+
+# Fencing anything on rack 1 requires fencing via both APC 1 and 2,
+# to defeat the redundancy provided by two separate UPS units.
+fencing_topology attr:rack=1 apc01,apc02
+
+# Fencing for all machines named green.* is done using the pear
+# fencing device first, while all machines named red.* are fenced
+# using the apple fencing device first.
+fencing_topology \
+ pattern:green.* pear apple \
+ pattern:red.* apple pear
+...............
+
+[[cmdhelp_configure_filter,filter CIB objects]]
+==== `filter`
+
+This command filters the given CIB elements through an external
+program. The program should accept input on `stdin` and send
+output to `stdout` (the standard UNIX filter conventions). As
+with the `show` command, the user may choose to filter all or
+just a subset of elements.
+
+It is possible to filter the XML representation of objects, but
+probably not as useful as the configuration language. The
+presentation is somewhat different from what would be displayed
+by the `show` command---each element is shown on a single line,
+i.e. there are no backslashes and no other embelishments.
+
+Don't forget to put quotes around the filter if it contains
+spaces.
+
+Usage:
+...............
+filter <prog> [xml] [<id> ...]
+filter <prog> [xml] changed
+...............
+Examples:
+...............
+filter "sed '/^primitive/s/target-role=[^ ]*//'"
+# crm configure filter "sed '/^primitive/s/target-role=[^ ]*//'"
+crm configure <<END
+ filter "sed '/threshold=\"1\"/s/=\"1\"/=\"0\"/g'"
+END
+...............
+
+.Note on quotation marks
+**************************
+Filter commands which feature a blend of quotation marks can be
+difficult to get right, especially when used directly from bash, since
+bash does its own quotation parsing. In these cases, it can be easier
+to supply the filter command as standard input. See the last example
+above.
+**************************
+
+[[cmdhelp_configure_get_property,Get property value]]
+==== `get-property`
+
+Show the value of the given property. If the value is not set, the
+command will print the default value for the property, if known.
+
+If no property name is passed to the command, the list of known
+cluster properties is printed.
+
+If the property is set multiple times, for example using multiple
+property sets with different rule expressions, the output of this
+command is undefined.
+
+Pass the argument +-t+ or +--true+ to `get-property` to translate
+the argument value into +true+ or +false+. If the value is not
+set, the command will print +false+.
+
+Usage:
+...............
+get-property [-t|--true] [<name>]
+...............
+
+Example:
+...............
+get-property stonith-enabled
+get-property -t maintenance-mode
+...............
+
+[[cmdhelp_configure_graph,generate a directed graph]]
+==== `graph`
+
+Create a graphviz graphical layout from the current cluster
+configuration.
+
+Currently, only `dot` (directed graph) is supported. It is
+essentially a visualization of resource ordering.
+
+The graph may be saved to a file which can be used as source for
+various graphviz tools (by default it is displayed in the user's
+X11 session). Optionally, by specifying the format, one can also
+produce an image instead.
+
+For more or different graphviz attributes, it is possible to save
+the default set of attributes to an ini file. If this file exists
+it will always override the builtin settings. The +exportsettings+
+subcommand also prints the location of the ini file.
+
+Usage:
+...............
+graph [<gtype> [<file> [<img_format>]]]
+graph exportsettings
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph dot
+graph dot clu1.conf.dot
+graph dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_configure_group,define a group]]
+==== `group`
+
+The `group` command creates a group of resources. This can be useful
+when resources depend on other resources and require that those
+resources start in order on the same node. A common use of resource
+groups is to ensure that a server and a virtual IP are located
+together, and that the virtual IP is started before the server.
+
+Grouped resources are started in the order they appear in the group,
+and stopped in the reverse order. If a resource in the group cannot
+run anywhere, resources following it in the group will not start.
+
+`group` can be passed the "container" meta attribute, to indicate that
+it is to be used to group VM resources monitored using Nagios. The
+resource referred to by the container attribute must be of type
+`ocf:heartbeat:Xen`, `ocf:heartbeat:VirtualDomain` or `ocf:heartbeat:lxc`.
+
+Usage:
+...............
+group <name> <rsc> [<rsc>...]
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+group internal_www disk0 fs0 internal_ip apache \
+ meta target_role=stopped
+
+group vm-and-services vm vm-sshd meta container="vm"
+...............
+
+[[cmdhelp_configure_load,import the CIB from a file]]
+==== `load`
+
+Load a part of configuration (or all of it) from a local file or
+a network URL. The +replace+ method replaces the current
+configuration with the one from the source. The +update+ method
+tries to import the contents into the current configuration. The
++push+ method imports the contents into the current configuration
+and removes any lines that are not present in the given
+configuration.
+The file may be a CLI file or an XML file.
+
+If the URL is `-`, the configuration is read from standard input.
+
+Usage:
+...............
+load [xml] <method> URL
+
+method :: replace | update | push
+...............
+Example:
+...............
+load xml update myfirstcib.xml
+load xml replace http://storage.big.com/cibs/bigcib.xml
+load xml push smallcib.xml
+...............
+
+[[cmdhelp_configure_location,a location preference]]
+==== `location`
+
+`location` defines the preference of nodes for the given
+resource. The location constraints consist of one or more rules
+which specify a score to be awarded if the rule matches.
+
+The resource referenced by the location constraint can be one of the
+following:
+
+* Plain resource reference: +location loc1 webserver 100: node1+
+* Resource set in curly brackets: +location loc1 { virtual-ip webserver } 100: node1+
+* Tag containing resource ids: +location loc1 tag1 100: node1+
+* Resource pattern: +location loc1 /web.*/ 100: node1+
+
+The +resource-discovery+ attribute allows probes to be selectively
+enabled or disabled per resource and node.
+
+The syntax for resource sets is described in detail for
+<<cmdhelp_configure_colocation,`colocation`>>.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+location <id> <rsc> [<attributes>] {<node_pref>|<rules>}
+
+rsc :: /<rsc-pattern>/
+ | { resource_sets }
+ | <rsc>
+
+attributes :: role=<role> | resource-discovery=always|never|exclusive
+
+node_pref :: <score>: <node>
+
+rules ::
+ rule [id_spec] [$role=<role>] <score>: <expression>
+ [rule [id_spec] [$role=<role>] <score>: <expression> ...]
+
+id_spec :: $id=<id> | $id-ref=<id>
+score :: <number> | <attribute> | [-]inf
+expression :: <simple_exp> [<bool_op> <simple_exp> ...]
+bool_op :: or | and
+simple_exp :: <attribute> [type:]<binary_op> <value>
+ | <unary_op> <attribute>
+ | date <date_expr>
+type :: string | version | number
+binary_op :: lt | gt | lte | gte | eq | ne
+unary_op :: defined | not_defined
+
+date_expr :: lt <end>
+ | gt <start>
+ | in start=<start> end=<end>
+ | in start=<start> <duration>
+ | spec <date_spec>
+duration|date_spec ::
+ hours=<value>
+ | monthdays=<value>
+ | weekdays=<value>
+ | yearsdays=<value>
+ | months=<value>
+ | weeks=<value>
+ | years=<value>
+ | weekyears=<value>
+ | moon=<value>
+...............
+Examples:
+...............
+location conn_1 internal_www 100: node1
+
+location conn_1 internal_www \
+ rule 50: #uname eq node1 \
+ rule pingd: defined pingd
+
+location conn_2 dummy_float \
+ rule -inf: not_defined pingd or pingd number:lte 0
+
+# never probe for rsc1 on node1
+location no-probe rsc1 resource-discovery=never -inf: node1
+...............
+
+[[cmdhelp_configure_modgroup,modify group]]
+==== `modgroup`
+
+Add or remove primitives in a group. The `add` subcommand appends
+the new group member by default. Should it go elsewhere, there
+are `after` and `before` clauses.
+
+Usage:
+...............
+modgroup <id> add <id> [after <id>|before <id>]
+modgroup <id> remove <id>
+...............
+Examples:
+...............
+modgroup share1 add storage2 before share1-fs
+...............
+
+[[cmdhelp_configure_monitor,add monitor operation to a primitive]]
+==== `monitor`
+
+Monitor is by far the most common operation. It is possible to
+add it without editing the whole resource. Also, long primitive
+definitions may be a bit uncluttered. In order to make this
+command as concise as possible, less common operation attributes
+are not available. If you need them, then use the `op` part of
+the `primitive` command.
+
+Usage:
+...............
+monitor <rsc>[:<role>] <interval>[:<timeout>]
+...............
+Example:
+...............
+monitor apcfence 60m:60s
+...............
+
+Note that after executing the command, the monitor operation may
+be shown as part of the primitive definition.
+
+[[cmdhelp_configure_ms,define a master-slave resource]]
+==== `ms` (`master`)
+
+The `ms` command creates a master/slave resource type. It may contain a
+single primitive resource or one group of resources.
+
+Usage:
+...............
+ms <name> <rsc>
+ [description=<description>]
+ [meta attr_list]
+ [params attr_list]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+...............
+Example:
+...............
+ms disk1 drbd1 \
+ meta notify=true globally-unique=false
+...............
+
+.Note on `ms` deprecated
+****************************
+From Pacemaker-2.0, the resource type referred to as "master/slave",
+"stateful", or "multi-state" is no longer a separate resource type,
+but a variation of clone now referred to as a "promotable clone".
+For backward compatibility, above configurations are also accepted.
+...............
+clone disk1 drbd1 \
+ meta promotable=true notify=true globally-unique=false
+...............
+****************************
+
+.Note on `id-ref` usage
+****************************
+Instance or meta attributes (`params` and `meta`) may contain
+a reference to another set of attributes. In that case, no other
+attributes are allowed. Since attribute sets' ids, though they do
+exist, are not shown in the `crm`, it is also possible to
+reference an object instead of an attribute set. `crm` will
+automatically replace such a reference with the right id:
+
+...............
+crm(live)configure# primitive a2 www-2 meta $id-ref=a1
+crm(live)configure# show a2
+primitive a2 apache \
+ meta $id-ref=a1-meta_attributes
+ [...]
+...............
+It is advisable to give meaningful names to attribute sets which
+are going to be referenced.
+****************************
+
+[[cmdhelp_configure_node,define a cluster node]]
+==== `node`
+
+The node command describes a cluster node. Nodes in the CIB are
+commonly created automatically by the CRM. Hence, you should not
+need to deal with nodes unless you also want to define node
+attributes. Note that it is also possible to manage node
+attributes at the `node` level.
+
+Usage:
+...............
+node [$id=<id>] <uname>[:<type>]
+ [description=<description>]
+ [attributes [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+ [utilization [$id=<id>] [<score>:] [rule...]
+ <param>=<value> [<param>=<value>...]] | $id-ref=<ref>
+
+type :: normal | member | ping | remote
+...............
+Example:
+...............
+node node1
+node big_node attributes memory=64
+...............
+
+[[cmdhelp_configure_op_defaults,set resource operations defaults]]
+==== `op_defaults`
+
+Set defaults for the operations meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+op_defaults [$id=<set_id>] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+op_defaults record-pending=true
+...............
+
+[[cmdhelp_configure_order,order resources]]
+==== `order`
+
+This constraint expresses the order of actions on two resources
+or more resources. If there are more than two resources, then the
+constraint is called a resource set.
+
+Ordered resource sets have an extra attribute to allow for sets
+of resources whose actions may run in parallel. The shell syntax
+for such sets is to put resources in parentheses.
+
+If the subsequent resource can start or promote after any one of the
+resources in a set has done, enclose the set in brackets (+[+ and +]+).
+
+Sets cannot be nested.
+
+Three strings are reserved to specify a kind of order constraint:
++Mandatory+, +Optional+, and +Serialize+. It is preferred to use
+one of these settings instead of score. Previous versions mapped
+scores +0+ and +inf+ to keywords +advisory+ and +mandatory+.
+That is still valid but deprecated.
+
+For more details on how to configure resource sets, see
+<<topics_Features_Resourcesets,`Syntax: Resource sets`>>.
+
+Usage:
+...............
+order <id> [kind:] first then [symmetrical=<bool>]
+
+order <id> [kind:] resource_sets [symmetrical=<bool>]
+
+kind :: Mandatory | Optional | Serialize
+
+first :: <rsc>[:<action>]
+
+then :: <rsc>[:<action>]
+
+resource_sets :: resource_set [resource_set ...]
+
+resource_set :: ["["|"("] <rsc>[:<action>] [<rsc>[:<action>] ...] \
+ [attributes] ["]"|")"]
+
+attributes :: [require-all=(true|false)] [sequential=(true|false)]
+
+...............
+Example:
+...............
+order o-1 Mandatory: apache:start ip_1
+order o-2 Serialize: A ( B C )
+order o-4 first-resource then-resource
+...............
+
+[[cmdhelp_configure_primitive,define a resource]]
+==== `primitive`
+
+The primitive command describes a resource. It may be referenced
+only once in group, clone, or master-slave objects. If it's not
+referenced, then it is placed as a single resource in the CIB.
+
+Operations may be specified anonymously, as a group or by reference:
+
+* "Anonymous", as a list of +op+ specifications. Use this
+ method if you don't need to reference the set of operations
+ elsewhere. This is the most common way to define operations.
+
+* If reusing operation sets is desired, use the +operations+ keyword
+ along with an id to give the operations set a name. Use the
+ +operations+ keyword and an id-ref value set to the id of another
+ operations set, to apply the same set of operations to this
+ primitive.
+
+Operation attributes which are not recognized are saved as
+instance attributes of that operation. A typical example is
++OCF_CHECK_LEVEL+.
+
+For multistate resources, roles are specified as +role=<role>+.
+
+A template may be defined for resources which are of the same
+type and which share most of the configuration. See
+<<cmdhelp_configure_rsc_template,`rsc_template`>> for more information.
+
+Attributes containing time values, such as the +interval+ attribute on
+operations, are configured either as a plain number, which is
+interpreted as a time in seconds, or using one of the following
+suffixes:
+
+* +s+, +sec+ - time in seconds (same as no suffix)
+* +ms+, +msec+ - time in milliseconds
+* +us+, +usec+ - time in microseconds
+* +m+, +min+ - time in minutes
+* +h+, +hr+ - time in hours
+
+Usage:
+...............
+primitive <rsc> {[<class>:[<provider>:]]<type>|@<template>}
+ [description=<description>]
+ [[params] attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...]
+ [[op_params] attr_list]
+ [op_meta attr_list] ...]
+
+attr_list :: [$id=<id>] [<score>:] [rule...]
+ <attr>=<val> [<attr>=<val>...]] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+primitive apcfence stonith:apcsmart \
+ params ttydev=/dev/ttyS0 hostlist="node1 node2" \
+ op start timeout=60s \
+ op monitor interval=30m timeout=60s
+
+primitive www8 apache \
+ configfile=/etc/apache/www8.conf \
+ operations $id-ref=apache_ops
+
+primitive db0 mysql \
+ params config=/etc/mysql/db0.conf \
+ op monitor interval=60s \
+ op monitor interval=300s OCF_CHECK_LEVEL=10
+
+primitive r0 ocf:linbit:drbd \
+ params drbd_resource=r0 \
+ op monitor role=Master interval=60s \
+ op monitor role=Slave interval=300s
+
+primitive xen0 @vm_scheme1 xmfile=/etc/xen/vm/xen0
+
+primitive mySpecialRsc Special \
+ params 3: rule #uname eq node1 interface=eth1 \
+ params 2: rule #uname eq node2 interface=eth2 port=8888 \
+ params 1: interface=eth0 port=9999
+
+primitive A ocf:pacemaker:Dummy \
+ op start \
+ op_meta 2: rule #ra-version version:gt 1.0 timeout=120s \
+ op_meta 1: timeout=60s
+...............
+
+[[cmdhelp_configure_property,set a cluster property]]
+==== `property`
+
+Set cluster configuration properties. To list the
+available cluster configuration properties, use the
+<<cmdhelp_ra_info,`ra info`>> command with +pengine+, +crmd+,
++cib+ and +stonithd+ as arguments.
+When setting the +maintenance-mode+ property, it will
+inform the user if there are nodes or resources that
+have the +maintenance+ property.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+property [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+property stonith-enabled=true
+property rule date spec years=2014 stonith-enabled=false
+...............
+
+[[cmdhelp_configure_ptest,show cluster actions if changes were committed]]
+==== `ptest` (`simulate`)
+
+Show PE (Policy Engine) motions using `ptest(8)` or
+`crm_simulate(8)`.
+
+A CIB is constructed using the current user edited configuration
+and the status from the running CIB. The resulting CIB is run
+through `ptest` (or `crm_simulate`) to show changes which would
+happen if the configuration is committed.
+
+The status section may be loaded from another source and modified
+using the <<cmdhelp_cibstatus,`cibstatus`>> level commands. In that case, the
+`ptest` command will issue a message informing the user that the
+Policy Engine graph is not calculated based on the current status
+section and therefore won't show what would happen to the
+running but some imaginary cluster.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Add a string of +v+ characters to increase verbosity. `ptest`
+can also show allocation scores. +utilization+ turns on
+information about the remaining capacity of nodes. With the
++actions+ option, `ptest` will print all resource actions.
+
+The `ptest` program has been replaced by `crm_simulate` in newer
+Pacemaker versions. In some installations both could be
+installed. Use `simulate` to enfore using `crm_simulate`.
+
+Usage:
+...............
+ptest [nograph] [v...] [scores] [actions] [utilization]
+...............
+Examples:
+...............
+ptest scores
+ptest vvvvv
+simulate actions
+...............
+
+[[cmdhelp_configure_refresh,refresh from CIB]]
+==== `refresh`
+
+Refresh the internal structures from the CIB. All changes made
+during this session are lost.
+
+Usage:
+...............
+refresh
+...............
+
+[[cmdhelp_configure_rename,rename a CIB object]]
+==== `rename`
+
+Rename an object. It is recommended to use this command to rename
+a resource, because it will take care of updating all related
+constraints and a parent resource. Changing ids with the edit
+command won't have the same effect.
+
+If you want to rename a resource, it must be in the stopped state.
+
+Usage:
+...............
+rename <old_id> <new_id>
+...............
+
+[[cmdhelp_configure_role,define role access rights]]
+==== `role`
+
+An ACL role is a set of rules which describe access rights to
+CIB. Rules consist of an access right +read+, +write+, or +deny+
+and a specification denoting part of the configuration to which
+the access right applies. The specification can be an XPath or a
+combination of tag and id references. If an attribute is
+appended, then the specification applies only to that attribute
+of the matching element.
+
+There is a number of shortcuts for XPath specifications. The
++meta+, +params+, and +utilization+ shortcuts reference resource
+meta attributes, parameters, and utilization respectively. The
+`location` may be used to specify location constraints most of
+the time to allow resource `move` and `unmove` commands. The
+`property` references cluster properties. The `node` allows
+reading node attributes. +nodeattr+ and +nodeutil+ reference node
+attributes and node capacity (utilization). The `status` shortcut
+references the whole status section of the CIB. Read access to
+status is necessary for various monitoring tools such as
+`crm_mon(8)` (aka `crm status`).
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+role <role-id> rule [rule ...]
+
+rule :: acl-right cib-spec [attribute:<attribute>]
+
+acl-right :: read | write | deny
+
+cib-spec :: xpath-spec | tag-ref-spec
+xpath-spec :: xpath:<xpath> | shortcut
+tag-ref-spec :: tag:<tag> | ref:<id> | tag:<tag> ref:<id>
+
+shortcut :: meta:<rsc>[:<attr>]
+ params:<rsc>[:<attr>]
+ utilization:<rsc>
+ location:<rsc>
+ property[:<attr>]
+ node[:<node>]
+ nodeattr[:<attr>]
+ nodeutil[:<node>]
+ status
+...............
+Example:
+...............
+role app1_admin \
+ write meta:app1:target-role \
+ write meta:app1:is-managed \
+ write location:app1 \
+ read ref:app1
+...............
+
+[[cmdhelp_configure_rsc_defaults,set resource defaults]]
+==== `rsc_defaults`
+
+Set defaults for the resource meta attributes.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+rsc_defaults [<set_id>:] [rule ...] <option>=<value> [<option>=<value> ...]
+...............
+Example:
+...............
+rsc_defaults failure-timeout=3m
+...............
+
+[[cmdhelp_configure_rsc_template,define a resource template]]
+==== `rsc_template`
+
+The `rsc_template` command creates a resource template. It may be
+referenced in primitives. It is used to reduce large
+configurations with many similar resources.
+
+Usage:
+...............
+rsc_template <name> [<class>:[<provider>:]]<type>
+ [description=<description>]
+ [params attr_list]
+ [meta attr_list]
+ [utilization attr_list]
+ [operations id_spec]
+ [op op_type [<attribute>=<value>...] ...]
+
+attr_list :: [$id=<id>] <attr>=<val> [<attr>=<val>...] | $id-ref=<id>
+id_spec :: $id=<id> | $id-ref=<id>
+op_type :: start | stop | monitor
+...............
+Example:
+...............
+rsc_template public_vm Xen \
+ op start timeout=300s \
+ op stop timeout=300s \
+ op monitor interval=30s timeout=60s \
+ op migrate_from timeout=600s \
+ op migrate_to timeout=600s
+primitive xen0 @public_vm \
+ params xmfile=/etc/xen/xen0
+primitive xen1 @public_vm \
+ params xmfile=/etc/xen/xen1
+...............
+
+[[cmdhelp_configure_rsc_ticket,resources ticket dependency]]
+==== `rsc_ticket`
+
+This constraint expresses dependency of resources on cluster-wide
+attributes, also known as tickets. Tickets are mainly used in
+geo-clusters, which consist of multiple sites. A ticket may be
+granted to a site, thus allowing resources to run there.
+
+The +loss-policy+ attribute specifies what happens to the
+resource (or resources) if the ticket is revoked. The default is
+either +stop+ or +demote+ depending on whether a resource is
+multi-state.
+
+See also the <<cmdhelp_site_ticket,`site`>> set of commands.
+
+Usage:
+...............
+rsc_ticket <id> <ticket_id>: <rsc>[:<role>] [<rsc>[:<role>] ...]
+ [loss-policy=<loss_policy_action>]
+
+loss_policy_action :: stop | demote | fence | freeze
+...............
+Example:
+...............
+rsc_ticket ticket-A_public-ip ticket-A: public-ip
+rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence
+rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master
+...............
+
+
+[[cmdhelp_configure_rsctest,test resources as currently configured]]
+==== `rsctest`
+
+Test resources with current resource configuration. If no nodes
+are specified, tests are run on all known nodes.
+
+The order of resources is significant: it is assumed that later
+resources depend on earlier ones.
+
+If a resource is multi-state, it is assumed that the role on
+which later resources depend is master.
+
+Tests are run sequentially to prevent running the same resource
+on two or more nodes. Tests are carried out only if none of the
+specified nodes currently run any of the specified resources.
+However, it won't verify whether resources run on the other
+nodes.
+
+Superuser privileges are obviously required: either run this as
+root or setup the `sudoers` file appropriately.
+
+Note that resource testing may take some time.
+
+Usage:
+...............
+rsctest <rsc_id> [<rsc_id> ...] [<node_id> ...]
+...............
+Examples:
+...............
+rsctest my_ip websvc
+rsctest websvc nodeB
+...............
+
+[[cmdhelp_configure_save,save the CIB to a file]]
+==== `save`
+
+Save the current configuration to a file. Optionally, as XML. Use
++-+ instead of file name to write the output to `stdout`.
+
+The `save` command accepts the same selection arguments as the `show`
+command. See the <<cmdhelp_configure_show,help section>> for `show`
+for more details.
+
+Usage:
+...............
+save [xml] [<id> | type:<type | tag:<tag> |
+ related:<obj> | changed ...] <file>
+...............
+Example:
+...............
+save myfirstcib.txt
+save web-server server-config.txt
+...............
+
+[[cmdhelp_configure_schema,set or display current CIB RNG schema]]
+==== `schema`
+
+CIB's content is validated by a RNG schema. Pacemaker supports
+several, depending on version. At least the following schemas are
+accepted by `crmsh`:
+
+* +pacemaker-1.0+
+* +pacemaker-1.1+
+* +pacemaker-1.2+
+* +pacemaker-1.3+
+* +pacemaker-2.0+
+
+Use this command to display or switch to another RNG schema.
+
+Usage:
+...............
+schema [<schema>]
+...............
+Example:
+...............
+schema pacemaker-1.1
+...............
+
+[[cmdhelp_configure_set,set an attribute value]]
+==== `set`
+
+Set the value of a configured attribute. The attribute must
+configured previously, and can be an agent parameter, meta attribute,
+utilization value or operation value.
+
+The first argument to the command is a path to an attribute.
+This is a dot-separated sequence beginning with the name of
+the resource or object, and ending with the name of the attribute to
+set. To set operation value, `op_type` should be specified; when multi
+operations exist like multi monitors, `interval` should be specified.
+
+Usage:
+...............
+set <path> <value>
+
+path:: id.[op_type.][interval.]name
+...............
+Examples:
+...............
+set vip1.ip 192.168.20.5
+set vm-a.force_stop 1
+set vip1.monitor.on-fail ignore
+set drbd.monitor.10s.interval 20s
+...............
+
+[[cmdhelp_configure_show,display CIB objects]]
+==== `show`
+
+The `show` command displays CIB objects. Without any argument, it
+displays all objects in the CIB, but the set of objects displayed by
+`show` can be limited to only objects with the given IDs or by using
+one or more of the special prefixes described below.
+
+The XML representation for the objects can be displayed by passing
++xml+ as the first argument.
+
+To show one or more specific objects, pass the object IDs as
+arguments.
+
+To show all objects of a certain type, use the +type:+ prefix.
+
+To show all objects in a tag, use the +tag:+ prefix.
+
+To show all constraints related to a primitive, use the +related:+ prefix.
+
+To show all modified objects, pass the argument +changed+.
+
+The prefixes can be used together on a single command line. For
+example, to show both the tag itself and the objects tagged by it the
+following combination can be used: +show tag:my-tag my-tag+.
+
+To refine a selection of objects using multiple modifiers, the keywords
++and+ and +or+ can be used. For example, to select all primitives tagged
++foo+, the following combination can be used:
++show type:primitive and tag:foo+.
+
+To hide values when displaying the configuration, use the
++obscure:<glob>+ argument. This can be useful when sending the
+configuration over a public channel, to avoid exposing potentially
+sensitive information. The +<glob>+ argument is a bash-style pattern
+matching attribute keys.
+
+In +/etc/crm/crm.conf+, +obscure_pattern+ option is the persisent configuration of CLI.
+Example, for the high security concern,
+...............
+[core]
+obscure_pattern = passw* | ip
+...............
+Which makes +crm configure show+ is equal to
+...............
+node-1:~ # crm configure show obscure:passw* obscure:ip
+node 1084783297: node1
+primitive fence_device stonith:fence_ilo5 \
+ params password="******"
+primitive ip IPaddr2 \
+ params ip="******"
+...............
+The default suggestion is +passw*+
+If you don't want to obscure, change the value to blank.
+
+Usage:
+...............
+show [xml] [<id>
+ | changed
+ | type:<type>
+ | tag:<id>
+ | related:<obj>
+ | obscure:<glob>
+ ...]
+
+type :: node | primitive | group | clone | ms | rsc_template
+ | location | colocation | order
+ | rsc_ticket
+ | property | rsc_defaults | op_defaults
+ | fencing_topology
+ | role | user | acl_target
+ | tag
+...............
+
+Example:
+...............
+show webapp
+show type:primitive
+show xml tag:db tag:fs
+show related:webapp
+show type:primitive obscure:passwd
+...............
+
+[[cmdhelp_configure_tag,Define resource tags]]
+==== `tag`
+
+Define a resource tag. A tag is an id referring to one or more
+resources, without implying any constraints between the tagged
+resources. This can be useful for grouping conceptually related
+resources.
+
+Usage:
+...............
+tag <tag-name>: <rsc> [<rsc> ...]
+tag <tag-name> <rsc> [<rsc> ...]
+...............
+Example:
+...............
+tag web: p-webserver p-vip
+tag ips server-vip admin-vip
+...............
+
+[[cmdhelp_configure_template,edit and import a configuration from a template]]
+==== `template`
+
+The specified template is loaded into the editor. It's up to the
+user to make a good CRM configuration out of it. See also the
+<<cmdhelp_template,template section>>.
+
+Usage:
+...............
+template [xml] url
+...............
+Example:
+...............
+template two-apaches.txt
+...............
+
+[[cmdhelp_configure_upgrade,upgrade the CIB]]
+==== `upgrade`
+
+Attempts to upgrade the CIB to validate with the current
+version. Commonly, this is required if the error
+`CIB not supported` occurs. It typically means that the
+active CIB version is coming from an older release.
+
+As a safety precaution, the force argument is required if the
++validation-with+ attribute is set to anything other than
++0.6+. Thus in most cases, it is required.
+
+Usage:
+...............
+upgrade [force]
+...............
+
+Example:
+...............
+upgrade force
+...............
+
+[[cmdhelp_configure_user,define user access rights]]
+==== `user`
+
+Users which normally cannot view or manage cluster configuration
+can be allowed access to parts of the CIB. The access is defined
+by a set of +read+, +write+, and +deny+ rules as in role
+definitions or by referencing roles. The latter is considered
+best practice.
+
+For more information on rule expressions, see
+<<topics_Syntax_RuleExpressions,Syntax: Rule expressions>>.
+
+Usage:
+...............
+user <uid> {roles|rules}
+
+roles :: role:<role-ref> [role:<role-ref> ...]
+rules :: rule [rule ...]
+...............
+Example:
+...............
+user joe \
+ role:app1_admin \
+ role:read_all
+...............
+
+[[cmdhelp_configure_validate_all,call agent validate-all for resource]]
+==== `validate-all`
+
+Call the `validate-all` action for the resource, if possible.
+
+Limitations:
+
+* The resource agent must implement the `validate-all` action.
+* The current user must be root.
+* The primitive resource must not use nvpair references.
+
+Usage:
+...............
+validate-all <rsc>
+...............
+
+
+[[cmdhelp_configure_verify,verify the CIB with crm_verify]]
+==== `verify`
+
+Verify the contents of the CIB which would be committed.
+
+Usage:
+...............
+verify
+...............
+
+[[cmdhelp_configure_xml,raw xml]]
+==== `xml`
+
+Even though we promissed no xml, it may happen, but hopefully
+very very seldom, that an element from the CIB cannot be rendered
+in the configuration language. In that case, the element will be
+shown as raw xml, prefixed by this command. That element can then
+be edited like any other. If the shell finds out that after the
+change it can digest it, then it is going to be converted into
+the normal configuration language. Otherwise, there is no need to
+use `xml` for configuration.
+
+Usage:
+...............
+xml <xml>
+...............
+
+[[cmdhelp_template,edit and import a configuration from a template]]
+=== `template` - Import configuration from templates
+
+User may be assisted in the cluster configuration by templates
+prepared in advance. Templates consist of a typical ready
+configuration which may be edited to suit particular user needs.
+
+This command enters a template level where additional commands
+for configuration/template management are available.
+
+[[cmdhelp_template_apply,process and apply the current configuration to the current CIB]]
+==== `apply`
+
+Copy the current or given configuration to the current CIB. By
+default, the CIB is replaced, unless the method is set to
+"update".
+
+Usage:
+...............
+apply [<method>] [<config>]
+
+method :: replace | update
+...............
+
+[[cmdhelp_template_delete,delete a configuration]]
+==== `delete`
+
+Remove a configuration. The loaded (active) configuration may be
+removed by force.
+
+Usage:
+...............
+delete <config> [force]
+...............
+
+[[cmdhelp_template_edit,edit a configuration]]
+==== `edit`
+
+Edit current or given configuration using your favourite editor.
+
+Usage:
+...............
+edit [<config>]
+...............
+
+[[cmdhelp_template_list,list configurations/templates]]
+==== `list`
+
+When called with no argument, lists existing templates and
+configurations.
+
+Given the argument +templates+, lists the available templates.
+
+Given the argument +configs+, lists the available configurations.
+
+Usage:
+...............
+list [templates|configs]
+...............
+
+[[cmdhelp_template_load,load a configuration]]
+==== `load`
+
+Load an existing configuration. Further `edit`, `show`, and
+`apply` commands will refer to this configuration.
+
+Usage:
+...............
+load <config>
+...............
+
+[[cmdhelp_template_new,create a new configuration from templates]]
+==== `new`
+
+Create a new configuration from one or more templates. Note that
+configurations and templates are kept in different places, so it
+is possible to have a configuration name equal a template name.
+
+If you already know which parameters are required, you can set
+them directly on the command line.
+
+The parameter name +id+ is set by default to the name of the
+configuration.
+
+If no parameters are being set and you don't want a particular name
+for your configuration, you can call this command with a template name
+as the only parameter. A unique configuration name based on the
+template name will be generated.
+
+Usage:
+...............
+new [<config>] <template> [<template> ...] [params name=value ...]
+...............
+
+Example:
+...............
+new vip virtual-ip
+new bigfs ocfs2 params device=/dev/sdx8 directory=/bigfs
+new apache
+...............
+
+[[cmdhelp_template_show,show the processed configuration]]
+==== `show`
+
+Process the current or given configuration and display the result.
+
+Usage:
+...............
+show [<config>]
+...............
+
+[[cmdhelp_cibstatus,CIB status management and editing]]
+=== `cibstatus` - CIB status management and editing
+
+The `status` section of the CIB keeps the current status of nodes
+and resources. It is modified _only_ on events, i.e. when some
+resource operation is run or node status changes. For obvious
+reasons, the CRM has no user interface with which it is possible
+to affect the status section. From the user's point of view, the
+status section is essentially a read-only part of the CIB. The
+current status is never even written to disk, though it is
+available in the PE (Policy Engine) input files which represent
+the history of cluster motions. The current status may be read
+using the +cibadmin -Q+ command.
+
+It may sometimes be of interest to see how status changes would
+affect the Policy Engine. The set of `cibstatus` level commands
+allow the user to load status sections from various sources and
+then insert or modify resource operations or change nodes' state.
+
+The effect of those changes may then be observed by running the
+<<cmdhelp_configure_ptest,`ptest`>> command at the `configure` level
+or `simulate` and `run` commands at this level. The `ptest`
+runs with the user edited CIB whereas the latter two commands
+run with the CIB which was loaded along with the status section.
+
+The `simulate` and `run` commands as well as all status
+modification commands are implemented using `crm_simulate(8)`.
+
+[[cmdhelp_cibstatus_load,load the CIB status section]]
+==== `load`
+
+Load a status section from a file, a shadow CIB, or the running
+cluster. By default, the current (+live+) status section is
+modified. Note that if the +live+ status section is modified it
+is not going to be updated if the cluster status changes, because
+that would overwrite the user changes. To make `crm` drop changes
+and resume use of the running cluster status, run +load live+.
+
+All CIB shadow configurations contain the status section which is
+a snapshot of the status section taken at the time the shadow was
+created. Obviously, this status section doesn't have much to do
+with the running cluster status, unless the shadow CIB has just
+been created. Therefore, the `ptest` command by default uses the
+running cluster status section.
+
+Usage:
+...............
+load {<file>|shadow:<cib>|live}
+...............
+Example:
+...............
+load bug-12299.xml
+load shadow:test1
+...............
+
+[[cmdhelp_cibstatus_node,change node status]]
+==== `node`
+
+Change the node status. It is possible to throw a node out of
+the cluster, make it a member, or set its state to unclean.
+
++online+:: Set the +node_state+ `crmd` attribute to +online+
+and the +expected+ and +join+ attributes to +member+. The effect
+is that the node becomes a cluster member.
+
++offline+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to empty. This makes the node
+cleanly removed from the cluster.
+
++unclean+:: Set the +node_state+ `crmd` attribute to +offline+
+and the +expected+ attribute to +member+. In this case the node
+has unexpectedly disappeared.
+
+Usage:
+...............
+node <node> {online|offline|unclean}
+...............
+Example:
+...............
+node xen-b unclean
+...............
+
+[[cmdhelp_cibstatus_op,edit outcome of a resource operation]]
+==== `op`
+
+Edit the outcome of a resource operation. This way you can
+tell CRM that it ran an operation and that the resource agent
+returned certain exit code. It is also possible to change the
+operation's status. In case the operation status is set to
+something other than +done+, the exit code is effectively
+ignored.
+
+Usage:
+...............
+op <operation> <resource> <exit_code> [<op_status>] [<node>]
+
+operation :: probe | monitor[:<n>] | start | stop |
+ promote | demote | notify | migrate_to | migrate_from
+exit_code :: <rc> | success | generic | args |
+ unimplemented | perm | installed | configured | not_running |
+ master | failed_master
+op_status :: pending | done | cancelled | timeout | notsupported | error
+
+n :: the monitor interval in seconds; if omitted, the first
+ recurring operation is referenced
+rc :: numeric exit code in range 0..9
+...............
+Example:
+...............
+op start d1 xen-b generic
+op start d1 xen-b 1
+op monitor d1 xen-b not_running
+op stop d1 xen-b 0 timeout
+...............
+
+[[cmdhelp_cibstatus_origin,display origin of the CIB status section]]
+==== `origin`
+
+Show the origin of the status section currently in use. This
+essentially shows the latest `load` argument.
+
+Usage:
+...............
+origin
+...............
+
+[[cmdhelp_cibstatus_quorum,set the quorum]]
+==== `quorum`
+
+Set the quorum value.
+
+Usage:
+...............
+quorum <bool>
+...............
+Example:
+...............
+quorum false
+...............
+
+[[cmdhelp_cibstatus_run,run policy engine]]
+==== `run`
+
+Run the policy engine with the edited status section.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+run [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+run
+...............
+
+[[cmdhelp_cibstatus_save,save the CIB status section]]
+==== `save`
+
+The current internal status section with whatever modifications
+were performed can be saved to a file or shadow CIB.
+
+If the file exists and contains a complete CIB, only the status
+section is going to be replaced and the rest of the CIB will
+remain intact. Otherwise, the current user edited configuration
+is saved along with the status section.
+
+Note that all modifications are saved in the source file as soon
+as they are run.
+
+Usage:
+...............
+save [<file>|shadow:<cib>]
+...............
+Example:
+...............
+save bug-12299.xml
+...............
+
+[[cmdhelp_cibstatus_show,show CIB status section]]
+==== `show`
+
+Show the current status section in the XML format. Brace yourself
+for some unreadable output. Add +changed+ option to get a human
+readable output of all changes.
+
+Usage:
+...............
+show [changed]
+...............
+
+[[cmdhelp_cibstatus_simulate,simulate cluster transition]]
+==== `simulate`
+
+Run the policy engine with the edited status section and simulate
+the transition.
+
+Add a string of +v+ characters to increase verbosity. Specify
++scores+ to see allocation scores also. +utilization+ turns on
+information about the remaining capacity of nodes.
+
+If you have graphviz installed and X11 session, `dotty(1)` is run
+to display the changes graphically.
+
+Usage:
+...............
+simulate [nograph] [v...] [scores] [utilization]
+...............
+Example:
+...............
+simulate
+...............
+
+[[cmdhelp_cibstatus_ticket,manage tickets]]
+==== `ticket`
+
+Modify the ticket status. Tickets can be granted and revoked.
+Granted tickets could be activated or put in standby.
+
+Usage:
+...............
+ticket <ticket> {grant|revoke|activate|standby}
+...............
+Example:
+...............
+ticket ticketA grant
+...............
+
+[[cmdhelp_assist,Configuration assistant]]
+=== `assist` - Configuration assistant
+
+The `assist` sublevel is a collection of helper
+commands that create or modify resources and
+constraints, to simplify the creation of certain
+configurations.
+
+For more information on individual commands, see
+the help text for those commands.
+
+[[cmdhelp_assist_template,Create template for primitives]]
+==== `template`
+
+This command takes a list of primitives as argument, and creates a new
+`rsc_template` for these primitives. It can only do this if the
+primitives do not already share a template and are of the same type.
+
+Usage:
+........
+template primitive-1 primitive-2 primitive-3
+........
+
+[[cmdhelp_assist_weak-bond,Create a weak bond between resources]]
+==== `weak-bond`
+
+A colocation between a group of resources says that the resources
+should be located together, but it also means that those resources are
+dependent on each other. If one of the resources fails, the others
+will be restarted.
+
+If this is not desired, it is possible to circumvent: By placing the
+resources in a non-sequential set and colocating the set with a dummy
+resource which is not monitored, the resources will be placed together
+but will have no further dependency on each other.
+
+This command creates both the constraint and the dummy resource needed
+for such a colocation.
+
+Usage:
+........
+weak-bond resource-1 resource-2
+........
+
+[[cmdhelp_maintenance,Maintenance mode commands]]
+=== `maintenance` - Maintenance mode commands
+
+Maintenance mode commands are commands that manipulate resources
+directly without going through the cluster infrastructure. Therefore,
+it is essential to ensure that the cluster does not attempt to monitor
+or manipulate the resources while these commands are being executed.
+
+To ensure this, these commands require that maintenance mode is set
+either for the particular resource, or for the whole cluster.
+
+[[cmdhelp_maintenance_action,Invoke a resource action]]
+==== `action`
+
+Invokes the given action for the resource. This is
+done directly via the resource agent, so the command must
+be issued while the cluster or the resource is in
+maintenance mode.
+
+Unless the action is `start` or `monitor`, the action must be invoked
+on the same node as where the resource is running. If the resource is
+running on multiple nodes, the command will fail.
+
+To use SSH for executing resource actions on multiple nodes, append
+`ssh` after the action name. This requires SSH access to be configured
+between the nodes and the parallax python package to be installed.
+
+Usage:
+...............
+action <rsc> <action>
+action <rsc> <action> ssh
+...............
+Example:
+...............
+action webserver reload
+action webserver monitor ssh
+...............
+
+[[cmdhelp_maintenance_off,Disable maintenance mode]]
+==== `off`
+
+Disables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+off
+off <rsc>
+...............
+Example:
+...............
+off rsc1
+...............
+
+[[cmdhelp_maintenance_on,Enable maintenance mode]]
+==== `on`
+
+Enables maintenances mode, either for the whole cluster
+or for the given resource.
+
+Usage:
+...............
+on
+on <rsc>
+...............
+Example:
+...............
+on rsc1
+...............
+
+[[cmdhelp_history,Cluster history]]
+=== `history` - Cluster history
+
+Examining Pacemaker's history is a particularly involved task. The
+number of subsystems to be considered, the complexity of the
+configuration, and the set of various information sources, most of
+which are not exactly human readable, keep analyzing resource or node
+problems accessible to only the most knowledgeable. Or, depending on
+the point of view, to the most persistent. The following set of
+commands has been devised in hope to make cluster history more
+accessible.
+
+Of course, looking at _all_ history could be time consuming regardless
+of how good the tools at hand are. Therefore, one should first say
+which period he or she wants to analyze. If not otherwise specified,
+the last hour is considered. Logs and other relevant information is
+collected using `crm report`. Since this process takes some time and
+we always need fresh logs, information is refreshed in a much faster
+way using the python parallax module. If +python-parallax+ is not
+found on the system, examining a live cluster is still possible --
+though not as comfortable.
+
+Apart from examining a live cluster, events may be retrieved from a
+report generated by `crm report` (see also the +-H+ option). In that
+case we assume that the period stretching the whole report needs to be
+investigated. Of course, it is still possible to further reduce the
+time range.
+
+If you have discovered an issue that you want to show someone else,
+you can use the `session pack` command to save the current session as
+a tarball, similar to those generated by `crm report`.
+
+In order to minimize the size of the tarball, and to make it easier
+for others to find the interesting events, it is recommended to limit
+the time frame which the saved session covers. This can be done using
+the `timeframe` command (example below).
+
+It is also possible to name the saved session using the `session save`
+command.
+
+Example:
+...............
+crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
+crm(live)history# session save strange_restart
+crm(live)history# session pack
+Report saved in .../strange_restart.tar.bz2
+crm(live)history#
+...............
+
+[[cmdhelp_history_detail,set the level of detail shown]]
+==== `detail`
+
+How much detail to show from the logs. Valid detail levels are either
+`0` or `1`, where `1` is the highest detail level. The default detail
+level is `0`.
+
+Usage:
+...............
+detail <detail_level>
+
+detail_level :: small integer (defaults to 0)
+...............
+Example:
+...............
+detail 1
+...............
+
+[[cmdhelp_history_diff,cluster states/transitions difference]]
+==== `diff`
+
+A transition represents a change in cluster configuration or
+state. Use `diff` to see what has changed between two
+transitions.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+diff <pe> <pe> [status] [html]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+diff 2066 2067
+diff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_history_events,Show events in log]]
+==== `events`
+
+By analysing the log output and looking for particular
+patterns, the `events` command helps sifting through
+the logs to find when particular events like resources
+changing state or node failure may have occurred.
+
+This can be used to generate a combined list of events
+from all nodes.
+
+Usage:
+...............
+events
+...............
+
+Example:
+...............
+events
+...............
+
+[[cmdhelp_history_exclude,exclude log messages]]
+==== `exclude`
+
+If a log is infested with irrelevant messages, those messages may
+be excluded by specifying a regular expression. The regular
+expressions used are Python extended. This command is additive.
+To drop all regular expressions, use +exclude clear+. Run
+`exclude` only to see the current list of regular expressions.
+Excludes are saved along with the history sessions.
+
+Usage:
+...............
+exclude [<regex>|clear]
+...............
+Example:
+...............
+exclude kernel.*ocfs2
+...............
+
+[[cmdhelp_history_graph,generate a directed graph from the PE file]]
+==== `graph`
+
+Create a graphviz graphical layout from the PE file (the
+transition). Every transition contains the cluster configuration
+which was active at the time. See also <<cmdhelp_configure_graph,generate a directed graph
+from configuration>>.
+
+Usage:
+...............
+graph <pe> [<gtype> [<file> [<img_format>]]]
+
+gtype :: dot
+img_format :: `dot` output format (see the +-T+ option)
+...............
+Example:
+...............
+graph -1
+graph 322 dot clu1.conf.dot
+graph 322 dot clu1.conf.svg svg
+...............
+
+[[cmdhelp_history_info,Cluster information summary]]
+==== `info`
+
+The `info` command provides a summary of the information source, which
+can be either a live cluster snapshot or a previously generated
+report.
+
+Usage:
+...............
+info
+...............
+Example:
+...............
+info
+...............
+
+[[cmdhelp_history_latest,show latest news from the cluster]]
+==== `latest`
+
+The `latest` command shows a bit of recent history, more
+precisely whatever happened since the last cluster change (the
+latest transition). If the transition is running, the shell will
+first wait until it finishes.
+
+Usage:
+...............
+latest
+...............
+Example:
+...............
+latest
+...............
+
+[[cmdhelp_history_limit,limit timeframe to be examined]]
+==== `limit` (`timeframe`)
+
+This command can be used to modify the time span to examine. All
+history commands look at events within a certain time span.
+
+For the `live` source, the default time span is the _last hour_.
+
+There is no time span limit for the `hb_report` source.
+
+The time period is parsed by the `dateutil` python module. It
+covers a wide range of date formats. For instance:
+
+- 3:00 (today at 3am)
+- 15:00 (today at 3pm)
+- 2010/9/1 2pm (September 1st 2010 at 2pm)
+
+For more examples of valid time/date statements, please refer to the
+`python-dateutil` documentation:
+
+- https://dateutil.readthedocs.org/[dateutil.readthedocs.org]
+
+If the dateutil module is not available, then the time is parsed using
+strptime and only the kind as printed by `date(1)` is allowed:
+
+- Tue Sep 15 20:46:27 CEST 2010
+
+Usage:
+...............
+limit [<from_time>] [<to_time>]
+...............
+Examples:
+...............
+limit 10:15
+limit 15h22m 16h
+limit "Sun 5 20:46" "Sun 5 22:00"
+...............
+
+[[cmdhelp_history_log,log content]]
+==== `log`
+
+Show messages logged on one or more nodes. Leaving out a node
+name produces combined logs of all nodes. Messages are sorted by
+time and, if the terminal emulations supports it, displayed in
+different colours depending on the node to allow for easier
+reading.
+
+The sorting key is the timestamp as written by syslog which
+normally has the maximum resolution of one second. Obviously,
+messages generated by events which share the same timestamp may
+not be sorted in the same way as they happened. Such close events
+may actually happen fairly often.
+
+Usage:
+...............
+log [<node> [<node> ...] ]
+...............
+Example:
+...............
+log node-a
+...............
+
+[[cmdhelp_history_node,node events]]
+==== `node`
+
+Show important events that happened on a node. Important events
+are node lost and join, standby and online, and fence. Use either
+node names or extended regular expressions.
+
+Usage:
+...............
+node <node> [<node> ...]
+...............
+Example:
+...............
+node node1
+...............
+
+[[cmdhelp_history_peinputs,list or get PE input files]]
+==== `peinputs`
+
+Every event in the cluster results in generating one or more
+Policy Engine (PE) files. These files describe future motions of
+resources. The files are listed as full paths in the current
+report directory. Add +v+ to also see the creation time stamps.
+
+Usage:
+...............
+peinputs [{<range>|<number>} ...] [v]
+
+range :: <n1>:<n2>
+...............
+Example:
+...............
+peinputs
+peinputs 440:444 446
+peinputs v
+...............
+
+[[cmdhelp_history_refresh,refresh live report]]
+==== `refresh`
+
+This command makes sense only for the +live+ source and makes
+`crm` collect the latest logs and other relevant information from
+the logs. If you want to make a completely new report, specify
++force+.
+
+Usage:
+...............
+refresh [force]
+...............
+
+[[cmdhelp_history_resource,resource events]]
+==== `resource`
+
+Show actions and any failures that happened on all specified
+resources on all nodes. Normally, one gives resource names as
+arguments, but it is also possible to use extended regular
+expressions. Note that neither groups nor clones or master/slave
+names are ever logged. The resource command is going to expand
+all of these appropriately, so that clone instances or resources
+which are part of a group are shown.
+
+Usage:
+...............
+resource <rsc> [<rsc> ...]
+...............
+Example:
+...............
+resource bigdb public_ip
+resource my_.*_db2
+resource ping_clone
+...............
+
+[[cmdhelp_history_session,manage history sessions]]
+==== `session`
+
+Sometimes you may want to get back to examining a particular
+history period or bug report. In order to make that easier, the
+current settings can be saved and later retrieved.
+
+If the current history being examined is coming from a live
+cluster the logs, PE inputs, and other files are saved too,
+because they may disappear from nodes. For the existing reports
+coming from `hb_report`, only the directory location is saved
+(not to waste space).
+
+A history session may also be packed into a tarball which can
+then be sent to support.
+
+Leave out subcommand to see the current session.
+
+Usage:
+...............
+session [{save|load|delete} <name> | pack [<name>] | update | list]
+...............
+Examples:
+...............
+session save bnc966622
+session load rsclost-2
+session list
+...............
+
+[[cmdhelp_history_setnodes,set the list of cluster nodes]]
+==== `setnodes`
+
+In case the host this program runs on is not part of the cluster,
+it is necessary to set the list of nodes.
+
+Usage:
+...............
+setnodes node <node> [<node> ...]
+...............
+Example:
+...............
+setnodes node_a node_b
+...............
+
+[[cmdhelp_history_show,show status or configuration of the PE input file]]
+==== `show`
+
+Every transition is saved as a PE file. Use this command to
+render that PE file either as configuration or status. The
+configuration output is the same as `crm configure show`.
+
+Usage:
+...............
+show <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+show 2066
+show pe-input-2080.bz2 status
+...............
+
+[[cmdhelp_history_source,set source to be examined]]
+==== `source`
+
+Events to be examined can come from the current cluster or from a
+`hb_report` report. This command sets the source. `source live`
+sets source to the running cluster and system logs. If no source
+is specified, the current source information is printed.
+
+In case a report source is specified as a file reference, the file
+is going to be unpacked in place where it resides. This directory
+is not removed on exit.
+
+Usage:
+...............
+source [<dir>|<file>|live]
+...............
+Examples:
+...............
+source live
+source /tmp/customer_case_22.tar.bz2
+source /tmp/customer_case_22
+source
+...............
+
+[[cmdhelp_history_transition,show transition]]
+==== `transition`
+
+This command will print actions planned by the PE and run
+graphviz (`dotty`) to display a graphical representation of the
+transition. Of course, for the latter an X11 session is required.
+This command invokes `ptest(8)` in background.
+
+The +showdot+ subcommand runs graphviz (`dotty`) to display a
+graphical representation of the +.dot+ file which has been
+included in the report. Essentially, it shows the calculation
+produced by `pengine` which is installed on the node where the
+report was produced. In optimal case this output should not
+differ from the one produced by the locally installed `pengine`.
+
+The `log` subcommand shows the full log for the duration of the
+transition.
+
+A transition can also be saved to a CIB shadow for further
+analysis or use with `cib` or `configure` commands (use the
+`save` subcommand). The shadow file name defaults to the name of
+the PE input file.
+
+If the PE input file number is not provided, it defaults to the
+last one, i.e. the last transition. The last transition can also
+be referenced with number 0. If the number is negative, then the
+corresponding transition relative to the last one is chosen.
+
+If there are warning and error PE input files or different nodes
+were the DC in the observed timeframe, it may happen that PE
+input file numbers collide. In that case provide some unique part
+of the path to the file.
+
+After the `ptest` output, logs about events that happened during
+the transition are printed.
+
+The `tags` subcommand scans the logs for the transition and return a
+list of key events during that transition. For example, the tag
++error+ will be returned if there are any errors logged during the
+transition.
+
+Usage:
+...............
+transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
+transition showdot [<number>|<index>|<file>]
+transition log [<number>|<index>|<file>]
+transition save [<number>|<index>|<file> [name]]
+transition tags [<number>|<index>|<file>]
+...............
+Examples:
+...............
+transition
+transition 444
+transition -1
+transition pe-error-3.bz2
+transition node-a/pengine/pe-input-2.bz2
+transition showdot 444
+transition log
+transition save 0 enigma-22
+...............
+
+[[cmdhelp_history_transitions,List transitions]]
+==== `transitions`
+
+A transition represents a change in cluster configuration or
+state. This command lists the transitions in the current timeframe.
+
+Usage:
+...............
+transitions
+...............
+Example:
+...............
+transitions
+...............
+
+
+[[cmdhelp_history_wdiff,cluster states/transitions difference]]
+==== `wdiff`
+
+A transition represents a change in cluster configuration or
+state. Use `wdiff` to see what has changed between two
+transitions as word differences on a line-by-line basis.
+
+If you want to specify the current cluster configuration and
+status, use the string +live+.
+
+Normally, the first transition specified should be the one which
+is older, but we are not going to enforce that.
+
+Note that a single configuration update may result in more than
+one transition.
+
+Usage:
+...............
+wdiff <pe> <pe> [status]
+
+pe :: <number>|<index>|<file>|live
+...............
+Examples:
+...............
+wdiff 2066 2067
+wdiff pe-input-2080.bz2 live status
+...............
+
+[[cmdhelp_root_report,Create cluster status report]]
+=== `report`
+
+Interface to a tool for creating a cluster report. A report is an
+archive containing log files, configuration files, system information
+and other relevant data for a given time period. This is a useful tool
+for collecting data to attach to bug reports, or for detecting the
+root cause of errors resulting in resource failover, for example.
+
+See `crmsh_hb_report(8)` for more details on arguments,
+or call `crm report -h`
+
+Usage:
+...............
+report -f {time|"cts:"testnum} [-t time] [-u user] [-l file]
+ [-n nodes] [-E files] [-p patt] [-L patt] [-e prog]
+ [-MSDZAVsvhd] [dest]
+...............
+
+Examples:
+...............
+report -f 2pm report_1
+report -f "2007/9/5 12:30" -t "2007/9/5 14:00" report_2
+report -f 1:00 -t 3:00 -l /var/log/cluster/ha-debug report_3
+report -f "09sep07 2:00" -u hbadmin report_4
+report -f 18:00 -p "usern.*" -p "admin.*" report_5
+report -f cts:133 ctstest_133
+...............
+
+=== `end` (`cd`, `up`)
+
+The `end` command ends the current level and the user moves to
+the parent level. This command is available everywhere.
+
+Usage:
+...............
+end
+...............
+
+=== `help`
+
+The `help` command prints help for the current level or for the
+specified topic (command). This command is available everywhere.
+
+Usage:
+...............
+help [<topic>]
+...............
+
+=== `quit` (`exit`, `bye`)
+
+Leave the program.
+
+BUGS
+----
+Even though all sensible configurations (and most of those that
+are not) are going to be supported by the crm shell, I suspect
+that it may still happen that certain XML constructs may confuse
+the tool. When that happens, please file a bug report.
+
+The crm shell will not try to update the objects it does not
+understand. Of course, it is always possible to edit such objects
+in the XML format.
+
+AUTHORS
+-------
+Dejan Muhamedagic, <dejan@suse.de>
+Kristoffer Gronlund <kgronlund@suse.com>
+and many OTHERS
+
+SEE ALSO
+--------
+crm_resource(8), crm_attribute(8), crm_mon(8), cib_shadow(8),
+ptest(8), dotty(1), crm_simulate(8), cibadmin(8)
+
+
+COPYING
+-------
+Copyright \(C) 2008-2013 Dejan Muhamedagic.
+Copyright \(C) 2013 Kristoffer Gronlund.
+
+Free use of this software is granted under the terms of the GNU General Public License (GPL).
+
+//////////////////////
+ vim:ts=4:sw=4:expandtab:
+//////////////////////
diff --git a/doc/website-v1/news.adoc b/doc/website-v1/news.adoc
new file mode 100644
index 0000000..9cd0d6d
--- /dev/null
+++ b/doc/website-v1/news.adoc
@@ -0,0 +1,26 @@
+= News
+
+link:/news/2017-01-31-release-3_0_0[2017-01-31 10:00]
+
+:leveloffset: 1
+
+include::news/2017-01-31-release-3_0_0.adoc[]
+
+:leveloffset: 0
+
+''''
+* link:/news/2016-09-05-release-2_2_2[2016-09-05 19:00 Releasing crmsh version 2.2.2]
+* link:/news/2016-09-02-release-2_3_1[2016-09-02 10:00 Releasing crmsh version 2.3.1]
+* link:/news/2016-09-01-release-2_1_7[2016-09-01 09:00 Announcing crmsh stable release 2.1.7]
+* link:/news/2016-08-12-release-2_3_0[2016-08-12 10:30 Releasing crmsh version 2.3.0]
+* link:/news/2016-04-28-release-2_2_1[2016-04-28 01:00 crmsh 2.2.1 and 2.1.6 are released]
+* link:/news/2016-01-15-release-2_2_0[2016-01-15 15:00 crmsh 2.2.0 is released]
+* link:/news/2016-01-12-release-2_1_5[2016-01-12 10:00 Announcing crmsh stable release 2.1.5]
+* link:/news/2015-05-25-getting-started-jp[2015-05-25 13:30 Getting Started translated to Japanese]
+* link:/news/2015-05-13-release-2_1_4[2015-05-13 15:30 Announcing crmsh stable release 2.1.4]
+* link:/news/2015-04-10-release-2_1_3[2015-04-10 12:30 Announcing crmsh stable release 2.1.3]
+* link:/news/2015-01-26-release-2_1_2[2015-01-26 11:05 Announcing crmsh release 2.1.2]
+* link:/news/2014-10-28-release-2_1_1[2014-10-29 00:20 Announcing crmsh release 2.1.1]
+* link:/news/2014-06-30-release-2_1[2014-06-30 09:00 Announcing crmsh release 2.1]
+
+link:https://savannah.nongnu.org/news/?group_id=10890[Old News Archive]
diff --git a/doc/website-v1/news/2014-06-30-release-2_1.adoc b/doc/website-v1/news/2014-06-30-release-2_1.adoc
new file mode 100644
index 0000000..4c59a90
--- /dev/null
+++ b/doc/website-v1/news/2014-06-30-release-2_1.adoc
@@ -0,0 +1,93 @@
+Announcing crmsh release 2.1
+============================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2014-06-30 09:00
+
+Today we are proud to announce the release of `crmsh` version 2.1!
+This version primarily fixes all known issues found since the release
+of `crmsh` 2.0 in April, but also has some major new features.
+
+A massive thank you to everyone who has helped out with bug fixes,
+comments and contributions for this release!
+
+For a complete list of changes since the previous version, please
+refer to the changelog:
+
+* https://github.com/crmsh/crmsh/blob/2.1.0/ChangeLog
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/crmsh/crmsh/archive/2.1.0.tar.gz
+* https://github.com/crmsh/crmsh/archive/2.1.0.zip
+
+Here are some of the highlights of this release:
+
+== Rule expressions in attribute lists
+
+One of the biggest features in this release is full support for rule
+expressions wherever the XML syntax allows them.
+
+Here is an example of using rule expressions in an attribute list in
+order to set the virtual IP of an IPAddr2 resource to a different
+value on a specific node.
+
+----
+primitive vip-on-node1 IPAddr2 \
+ rule 10: #uname eq node1 ip=10.0.0.5 \
+ rule 1: ip=10.0.0.6
+----
+
+== Tags in the CIB
+
+A new feature added to Pacemaker recently is tags. This is a way
+to refer to multiple resources at once without creating any
+colocation or ordering relationship between them. For example, you
+could add all resources related to the database to a db tag, and
+then stop or start them all with a single command.
+
+----
+tag db drbd:Master fs sql-db
+----
+
+It is also possible to refer to tags in constraints.
+
+== Wildcards in show/edit
+
+The configure show and edit commands can now use glob-style
+wildcards to refer to multiple resources:
+
+----
+configure edit db-*
+----
+
+== Nvpair references
+
+Sometimes, different resources name the same parameters with different
+names. For example, an IPAddr2 may have an ip parameter that should be
+the same as a web servers server_ip parameter. By using nvpair
+references, it is possible to configure the ip in a single location.
+
+Note that this is a new feature in Pacemaker 1.1.12 and up.
+
+----
+primitive vip IPAddr2 params $my-ip:ip=192.168.0.1
+primitive www apache params @my-ip:server_ip
+----
+
+== New ACL syntax
+
+The support for Access Control Lists has been revised in Pacemaker
+1.1.12, and this release of crmsh supports the new syntax. Two new
+commands have been added: `acl_target` and `acl_group`. For more details,
+see the documentation.
+
+Thank you,
+
+Kristoffer and Dejan
+
diff --git a/doc/website-v1/news/2014-10-28-release-2_1_1.adoc b/doc/website-v1/news/2014-10-28-release-2_1_1.adoc
new file mode 100644
index 0000000..6b67f4f
--- /dev/null
+++ b/doc/website-v1/news/2014-10-28-release-2_1_1.adoc
@@ -0,0 +1,58 @@
+Announcing crmsh release 2.1.1
+==============================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2014-10-29 00:20
+
+Today we are proud to announce the release of `crmsh` version 2.1.1!
+This version primarily fixes all known issues found since the release
+of `crmsh` 2.1 in June. We recommend that all users of crmsh upgrade
+to this version, especially if using Pacemaker 1.1.12 or newer.
+
+A massive thank you to everyone who has helped out with bug fixes,
+comments and contributions for this release!
+
+For a complete list of changes since the previous version, please
+refer to the changelog:
+
+* https://github.com/crmsh/crmsh/blob/2.1.1/ChangeLog
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/crmsh/crmsh/archive/2.1.1.tar.gz
+* https://github.com/crmsh/crmsh/archive/2.1.1.zip
+
+Changes since the previous release:
+
+ - cibconfig: Clean up output from crm_verify (bnc#893138)
+ - high: constants: Add acl_target and acl_group to cib_cli_map (bnc#894041)
+ - high: parse: split shortcuts into valid rules
+ - medium: Handle broken CIB in find_objects
+ - high: scripts: Handle corosync.conf without nodelist in add-node (bnc#862577)
+ - medium: config: Assign default path in all cases
+ - high: cibconfig: Generate valid CLI syntax for attribute lists (bnc#897462)
+ - high: cibconfig: Add tag:<tag> to get all resources in tag
+ - doc: Documentation for show tag:<tag>
+ - low: report: Sort list of nodes
+ - high: parse: Allow empty attribute values in nvpairs (bnc#898625)
+ - high: cibconfig: Delay reinitialization after commit
+ - low: cibconfig: Improve wording of commit prompt
+ - low: cibconfig: Fix vim modeline
+ - high: report: Find nodes for any log type (boo#900654)
+ - high: hb_report: Collect logs from journald (boo#900654)
+ - high: cibconfig: Don't crash if given an invalid pattern (bnc#901714)
+ - high: xmlutil: Filter list of referenced resources (bnc#901714)
+ - medium: ui_resource: Only act on resources (#64)
+ - medium: ui_resource: Flatten, then filter (#64)
+ - high: ui_resource: Use correct name for error function (bnc#901453)
+ - high: ui_resource: resource trace failed if operation existed (bnc#901453)
+ - Improved test suite
+
+Thank you,
+
+Kristoffer and Dejan
diff --git a/doc/website-v1/news/2015-01-26-release-2_1_2.adoc b/doc/website-v1/news/2015-01-26-release-2_1_2.adoc
new file mode 100644
index 0000000..081bf1b
--- /dev/null
+++ b/doc/website-v1/news/2015-01-26-release-2_1_2.adoc
@@ -0,0 +1,69 @@
+Announcing crmsh release 2.1.2
+==============================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2015-01-26 11:05
+
+Today we are proud to announce the release of `crmsh` version 2.1.2!
+This version primarily fixes all known issues found since the release
+of `crmsh` 2.1.1 in October. We recommend that all users of crmsh upgrade
+to this version, especially if using Pacemaker 1.1.12 or newer.
+
+A massive thank you to everyone who has helped out with bug fixes,
+comments and contributions for this release!
+
+For a complete list of changes since the previous version, please
+refer to the changelog:
+
+* https://github.com/crmsh/crmsh/blob/2.1.2/ChangeLog
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/crmsh/crmsh/archive/2.1.2.tar.gz
+* https://github.com/crmsh/crmsh/archive/2.1.2.zip
+
+Changes since the previous release:
+
+ - medium: ui_resource: Set probe interval 0 if not set (bnc#905050)
+ - doc: Document probe op in resource trace (bnc#905050)
+ - high: config: Fix path to system-wide crm.conf (#67)
+ - medium: config: Fall back to /etc/crm/crmsh.conf (#67)
+ - low: cliformat: Colorize id: as identifier (boo#905338)
+ - medium: cibconfig: Don't bump epoch if stripping version
+ - medium: ui_context: Lazily import readline
+ - medium: config: Add core.ignore_missing_metadata (#68) (boo#905910)
+ - medium: cibconfig: Strip digest from v1 diffs (bnc#914098)
+ - medium: cibconfig: Detect v1 format and don't patch container changes (bnc#914098)
+ - high: xmlutil: Treat node type=member as normal (boo#904698)
+ - medium: xmlutil: Use idmgmt when creating new elements (bnc#901543)
+ - low: ui_resource: --reprobe and --refresh are deprecated (bnc#905092)
+ - doc: Document deprecation of refresh and reprobe (bnc#905092)
+ - medium: parse: Support resource-discovery in location constraints
+ - medium: Allow removing groups even if is_running (boo#905271)
+ - medium: cibconfig: Delete containers first in edits (boo#905268)
+ - medium: ui_history: Fix crash using empty object set
+ - Low: term: get rid of annying ^O in piped-to-less-R output
+ - medium: parse: Allow nvpair with no value using name= syntax (#71)
+ - medium: parse: Enable name[=value] for nvpair (#71)
+ - medium: utils: Check if path basename is less (#74)
+ - medium: utils: crm_daemon_dir is added to PATH in envsetup (#67)
+ - medium: cmd_status: Show pending if available, enable extra options
+ - high: utils: Locate binaries across sudo boundary (bnc#912483)
+ - Medium: history: match error/crit messages of pcmk 1.1.12
+ - low: ui_options: Add underscore aliases for legacy options
+ - medium: constants: Fix transition start detection
+ - medium: constants: Update transition regex (#77)
+ - medium: orderedset: Add OrderedSet type
+ - medium: cibconfig: Use orderedset to avoid reordering bugs (#79)
+ - low: xmlutil: logic bug in sanity_check_nvpairs
+ - medium: util: Don't fall back to current time
+ - medium: report: Fall back to end_ts = start_ts
+
+Thank you,
+
+Kristoffer and Dejan
diff --git a/doc/website-v1/news/2015-04-10-release-2_1_3.adoc b/doc/website-v1/news/2015-04-10-release-2_1_3.adoc
new file mode 100644
index 0000000..c186ff0
--- /dev/null
+++ b/doc/website-v1/news/2015-04-10-release-2_1_3.adoc
@@ -0,0 +1,68 @@
+Announcing crmsh stable release 2.1.3
+=====================================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2015-04-10 12:30
+
+Today we are proud to announce the release of `crmsh` version 2.1.3!
+This version fixes all known issues found since the release of `crmsh`
+2.1.2 in January. We recommend that all users of crmsh upgrade
+to this version, especially if using Pacemaker 1.1.12 or newer.
+
+A massive thank you to everyone who has helped out with bug fixes,
+comments and contributions for this release!
+
+For a complete list of changes since the previous version, please
+refer to the changelog:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.1.3/ChangeLog
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.1.3.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.1.3.zip
+
+Changes since the previous release:
+
+ - medium: parse: nvpair attributes with no value = <nvpair name=".."/> (#71)
+ - doc: Add link to clusterlabs.org
+ - medium: report: Convert RE exception to simpler UI output
+ - medium: report: Include transitions with configuration changes (bnc#917131)
+ - medium: config: Fix case-sensitivity for booleans
+ - medium: ra: Handle non-OCF agent meta-data better
+ - Medium: cibconf: preserve cib user attributes
+ - low: cibconfig: Improved debug output when schema change fails
+ - medium: parse: Treat pacemaker-next schema as 2.0+
+ - medium: schema: Test if node type is optional via schema
+ - medium: schema: Remove extra debug output
+ - low: pacemaker: Remove debug output
+ - medium: cibconfig: If a change results in no diff, exit silently
+ - medium: cibconfig: Allow delete of objects that don't exist without returning error code
+ - medium: cibconfig: Allow removal of non-existing elements if --force is set
+ - low: allow (0,1) as option booleans
+ - low: allow pacemaker 1.0 version detection
+ - Low: hb_report: add -Q to usage
+ - Low: hb_report: add -X option for extra ssh options
+ - doc: Move the main crmsh repository to the ClusterLabs organization on github
+ - high: ui_configure: Remove acl_group command (bnc#921056)
+ - high: cibconfig: Don't delete valid tickets when removing referenced objects (bnc#922039)
+ - high: ui_context: Wait for DC after commit, not before (#85)
+ - medium: templates: Clearer descriptions for editing templates (boo#921028)
+ - high: cibconfig: Derive id for ops from referenced resource name (boo#921028)
+ - medium: ui_template: Always generate id unless explicitly defined (boo#921028)
+ - low: template: Add 'new <template>' shortcut
+ - medium: ui_template: Make new command more robust (bnc#924641)
+ - medium: parse: Disallow location rules without resources
+ - high: parse: Don't allow constraints without applicants
+ - medium: cliformat: Escape double-quotes in nvpair values
+ - low: hb_report: Use crmsh config to find pengine/cib dirs (bsc#926377)
+ - low: main: Catch any ValueErrors that may leak through
+
+Thank you,
+
+Kristoffer and Dejan
diff --git a/doc/website-v1/news/2015-05-13-release-2_1_4.adoc b/doc/website-v1/news/2015-05-13-release-2_1_4.adoc
new file mode 100644
index 0000000..31297cf
--- /dev/null
+++ b/doc/website-v1/news/2015-05-13-release-2_1_4.adoc
@@ -0,0 +1,126 @@
+Announcing crmsh stable release 2.1.4
+=====================================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2015-05-13 15:30
+
+Today we are proud to announce the release of `crmsh` version 2.1.4!
+2.1.4 is a minor bug fix release with no major issues, so users
+already running 2.1.3 are mostly fine. Instead, the main reason
+for releasing 2.1.4 is as an excuse to talk about some other things
+that are happening with crmsh!
+
+The details for this release are available below.
+
+History Guide
+~~~~~~~~~~~~~
+
+Dejan has written a guide to using the crmsh history
+command. For those who are unfamiliar with the history explorer or
+want to know more about how to use it, this guide is a great
+introduction to what it does and how to use it.
+
+History is not a new crmsh feature, but, as we failed to
+advertise it and nothing works without proper marketing, it
+probably hasn't seen a very wide use. That's surely a pity and we
+hope that this gentle history guide is going to help.
+
+So, if you use crmsh and if you need help troubleshooting
+clusters (I surely do!), take a look here:
+
+http://crmsh.github.io/history-guide/
+
+FYI, the comprehensive crmsh help also has a short description of
+the feature:
+
+........
+crm history help
+........
+
+Goes without saying: all commands are described too.
+
+If you don't use crmsh, you'll still find a lot of useful
+information in the guide, so don't skip it.
+
+Hawk Presentation
+~~~~~~~~~~~~~~~~~
+
+I presented Hawk [1] and the History Explorer interface which
+builds upon the crmsh history feature at openSUSE conf in The Hague
+earlier this month. The video of that presentation is online here:
+
+++++++++++++
+<iframe width="420" height="315" src="https://www.youtube.com/embed/mngfxzXkFLw" frameborder="0" allowfullscreen></iframe>
+++++++++++++
+
+https://www.youtube.com/watch?v=mngfxzXkFLw
+
+[1]: https://github.com/ClusterLabs/hawk
+
+
+2.2.0 Development News
+~~~~~~~~~~~~~~~~~~~~~~~
+
+While 2.1.4 is the latest stable release, I am also working on releasing
+2.2.0 which will come with a bunch of new features. I'm still working
+on some of these and not everything is in the repository yet, so
+2.2.0 is probably at least a month or so away still. I was perhaps
+a bit optimistic when I tagged RC1 back in October last year. ;)
+
+However, right now I'd like to focus on one thing that is already in
+2.2.0 and which is available if you use the development packages from
+OBS: command shorthands. This makes crmsh a lot more convenient to use
+from the command line. Basically, you can use any unambiguous subset
+of a command name to refer to that command, and crmsh will figure out
+what you mean. This may sound confusing, so an example will help with
+explaining what I mean:
+
+This is one way of showing the current cluster configuration:
+
+........
+crm configure show
+........
+
+However, now you can shorten this to the following:
+
+........
+crm cfg show
+........
+
+Other examples of shorthand are `crm rsc stop r1` or `crm st`
+for status. And of course, tab completion in bash still works for
+the shorthand variants.
+
+The examples used here are not comprehensive. crmsh is pretty clever
+at figuring out which command was intended. Download the development
+release and try it out!
+
+2.1.4 Details
+~~~~~~~~~~~~~
+
+For a complete list of changes since the previous version, please
+refer to the changelog:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.1.4/ChangeLog
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.1.4.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.1.4.zip
+
+Changes since the previous release:
+
+- Medium: hb_report: use faster zypper interface if available
+- medium: ui_configure: Wait for DC when removing running resource
+- low: schema: Don't leak PacemakerError exceptions (#93)
+- parse: Don't require trailing colon in tag definitions
+- medium: utils: Allow 1/0 as boolean values for parameters
+
+Thank you,
+
+Kristoffer and Dejan
diff --git a/doc/website-v1/news/2015-05-25-getting-started-jp.adoc b/doc/website-v1/news/2015-05-25-getting-started-jp.adoc
new file mode 100644
index 0000000..c5c6759
--- /dev/null
+++ b/doc/website-v1/news/2015-05-25-getting-started-jp.adoc
@@ -0,0 +1,17 @@
+Getting Started translated to Japanese
+======================================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2015-05-25 13:30
+
+Many thanks to Motoharu Kubo at 3ware for offering to translate the
+`crmsh` documentation to Japanese!
+
+The first document to be translated is the link:/start-guide/[Getting Started] guide,
+now available in Japanese at the following location:
+
+* https://blog.3ware.co.jp/2015/05/crmsh-getting-started/
+
+Thank you,
+Kristoffer and Dejan
+
diff --git a/doc/website-v1/news/2016-01-12-release-2_1_5.adoc b/doc/website-v1/news/2016-01-12-release-2_1_5.adoc
new file mode 100644
index 0000000..93a3242
--- /dev/null
+++ b/doc/website-v1/news/2016-01-12-release-2_1_5.adoc
@@ -0,0 +1,56 @@
+Announcing crmsh stable release 2.1.5
+=====================================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2016-01-12 10:00
+
+Today we are proud to announce the release of `crmsh` version 2.1.5!
+This release mainly consists of bug fixes, as well as compatibility
+with Pacemaker 1.1.14.
+
+For a complete list of changes since the previous version, please
+refer to the changelog:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.1.5/ChangeLog
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.1.5.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.1.5.zip
+
+Changes since the previous release:
+
+- medium: report: Try to load source as session if possible (bsc#927407)
+- medium: crm_gv: Wrap non-identifier names in quotes (bsc#931837)
+- medium: crm_gv: Improved quoting of non-identifier node names (bsc#931837)
+- medium: crm_pkg: Fix cluster init bug on RH-based systems
+- medium: hb_report: Collect logs from pacemaker.log
+- medium: constants: Add 'provides' meta attribute (bsc#936587)
+- high: parse: Add attributes to terminator set (bsc#940920)
+- Medium: cibconfig: skip sanity check for properties other than cib-bootstrap-options
+- medium: config: Add report_tool_options (bsc#917638)
+- low: main: Bash completion didn't handle sudo correctly
+- high: report: New detection to fix missing transitions (bnc#917131)
+- medium: report: Add pacemaker.log to find_node_log list (bsc#941734)
+- high: hb_report: Prefer pacemaker.log if it exists (bsc#941681)
+- high: report: Output format from pacemaker has changed (bsc#941681)
+- high: report: Update transition edge regexes (bsc#942906)
+- medium: report: Reintroduce empty transition pruning (bsc#943291)
+- medium: log_patterns: Remove reference to function name in log patterns (bsc#942906)
+- low: hb_report: Collect libqb version (bsc#943327)
+- high: parse: Fix crash when referencing score types by name (bsc#940194)
+- low: constants: Add meta attributes for remote nodes
+- low: ui_history: Swap from and to times if to < from
+- high: cibconfig: Do not fail on unknown pacemaker schemas (bsc#946893)
+- high: log_patterns_118: Update the correct set of log patterns (bsc#942906)
+- high: xmlutil: Order is significant in resource_set (bsc#955434)
+- high: cibconfig: Fix XML import bug for cloned groups (bsc#959895)
+
+Thank you,
+
+Kristoffer and Dejan
diff --git a/doc/website-v1/news/2016-01-15-release-2_2_0.adoc b/doc/website-v1/news/2016-01-15-release-2_2_0.adoc
new file mode 100644
index 0000000..664526e
--- /dev/null
+++ b/doc/website-v1/news/2016-01-15-release-2_2_0.adoc
@@ -0,0 +1,210 @@
+crmsh 2.2.0 is released
+=======================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2016-01-15 15:00
+
+In June of last year, I released Release Candidate 3 of crmsh 2.2.0,
+and I honestly expected to have the final version ready no more than a
+few weeks later. Well, it took around 6 months, but now it is finally
+here!
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/2.2.0
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.2.0.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.2.0.zip
+
+There are many new features and quite a few bug fixes, but I'll try to
+describe some of the major changes below. For the full list of changes
+since version 2.1, see the ChangeLog. I am also including the list
+of changes since RC3 in the release notes below.
+
+* https://github.com/ClusterLabs/crmsh/blob/2.2.0/ChangeLog
+
+New features introduced in this version:
+
+* Support for many of the new features introduced in Pacemaker
+ 1.1.14, including improved support for Pacemaker Remote and
+ pattern-based fencing topology configurations.
+
+* The cluster scripts have been greatly improved and are now used as
+ the wizards backend for hawk. Together with the improvements in the
+ cluster scripts, the crmsh template sublevel has been deprecated,
+ and all of the templates are now available as cluster scripts. The
+ template sublevel will be removed in a future version.
+
+* The history explorer now replaces the combined log and event files
+ from hb_report using the commands crm history log and crm history
+ events. Additionally, the info summary now highlights transitions
+ that contain error logs or other potentially interesting events.
+
+* Improved verification before committing new changes, for example
+ crmsh now warns if colocation constraints are applied to group
+ children.
+
+* resource start / stop / restart now take multiple resource arguments.
+
+* Added a maintenance sublevel with commands intended to simplify the
+ use of maintenance mode.
+
+* Switched to using the python parallax library instead of pssh for
+ remote node communication. Parallax is a fork of pssh with
+ additional features added specifically for using it as a python
+ library rather than a command line tool. Packages for
+ python-parallax can be downloaded from the OBS, or it can be
+ installed from PyPI.
+
+Full list of changes since 2.2.0-rc3:
+
+- medium: history: Fix live report refresh (bsc#950422) (bsc#927414)
+- medium: history: Ignore central log
+- medium: cibconfig: Detect false container children
+- low: clidisplay: Avoid crash when colorizing None
+- medium: scripts: Load single file yml scripts
+- medium: scripts: Reformat scripts to simplified form
+- medium: ui_history: Add events command (bsc#952449)
+- low: hb_report: Drop function from event patterns
+- high: cibconfig: Preserve failure through edit (bsc#959965)
+- high: cibconfig: fail if new object already exists (bsc#959965)
+- medium: ui_cib: Call crm_shadow in batch mode to avoid spawning subshell (bsc#961392)
+- high: cibconfig: Fix XML import bug for cloned groups (bsc#959895)
+- high: ui_configure: Move validate-all validation to a separate command (bsc#956442)
+- high: scripts: Don't require scripts to be an array of one element
+- medium: scripts: Enable setting category in legacy wizards (bnc#957926)
+- high: scripts: Don't delete steps from upgraded wizards (bnc#957925)
+- high: ra: Only run validate-all if current user is root
+- high: cibconfig: Call validate-all action on agent in verify (bsc#956442)
+- high: script: Fix issues found in cluster scripts
+- high: ui_ra: Add ra validate command (bsc#956442)
+- low: resource: Fix unban alias for unmigrate
+- high: ui_resource: Add constraints and operations commands
+- high: ui_resource: Enable start/stop/status for multiple resources at once (bsc#952775)
+- high: scripts: Conservatively verify scripts that modify the CIB (bsc#951954)
+- high: xmlutil: Order is significant in resource_set (bsc#955434)
+- medium: scripts: Lower copy target to string
+- doc: configure load can read from stdin
+- medium: script: (filesystem) create stopped (bsc#952670)
+- medium: scripts: Check required parameters for optional sub-steps
+- high: scripts: Eval CIB text in correct scope (bsc#952600)
+- medium: utils: Fix python 2.6 compatibility
+- medium: ui_script: Tag legacy wizards as legacy in show (bsc#952226)
+- medium: scripts: No optional steps in legacy wizards (bsc#952226)
+- high: utils: Revised time zone handling (bsc#951759)
+- high: report: Fix syslog parser regexps (bsc#951759)
+- low: constants: Tweaked graph colours
+- high: scripts: Fix DRBD script resource reference (bsc#951028)
+- low: constants: Tweaked graph colors
+- medium: report: Make transitions without end stretch to 2525
+- high: utils: Handle time zones in parse_time (bsc#949511)
+- medium: hb_report: Remove reference to function name in event patterns (bsc#942906)
+- medium: ui_script: Optionally print common params
+- medium: cibconfig: Fix sanity check for attribute-based fencing topology (#110)
+- high: cibconfig: Fix bug with node/resource collision
+- high: scripts: Determine output format of script correctly (bsc#949980)
+- doc: add explanatory comments to fencing_topology
+- doc: add missing backslash in fencing_topology example
+- doc: add missing <> to fencing_topology syntax
+- low: don't use deprecated crm_attribute -U option
+- doc: resource-discovery for location constraints
+- high: utils: Fix cluster_copy_file error when nodes provided
+- low: xmlutil: More informative message when updating resource references after rename
+- doc: fix some command syntax grammar in the man page
+- high: cibconfig: Delete constraints before resources
+- high: cibconfig: Fix bug in is_edit_valid (bsc#948547)
+- medium: hb_report: Don't cat binary logs
+- high: cibconfig: Allow node/rsc id collision in _set_update (bsc#948547)
+- low: report: Silence tar warning on early stream close
+- high: cibconfig: Allow nodes and resources with the same ID (bsc#948547)
+- high: log_patterns_118: Update the correct set of log patterns (bsc#942906)
+- low: ui_resource: Silence spurious migration non-warning from pacemaker
+- medium: config: Always fall back to /usr/bin:/usr/sbin:/bin:/sbin for programs (bsc#947818)
+- medium: report: Enable opening .xz-compressed report tarballs
+- medium: cibconfig: Only warn for grouped children in colocations (bsc#927423)
+- medium: cibconfig: Allow order constraints on group children (bsc#927423)
+- medium: cibconfig: Warn if configuring constraint on child resource (bsc#927423) (#101)
+- high: ui_node: Show remote nodes in crm node list (bsc#877962)
+- high: config: Remove config.core.supported_schemas (bsc#946893)
+- medium: report: Mark transitions with errors with a star in info output (bsc#943470)
+- low: report: Remove first transition tag regex
+- medium: report: Add transition tags command (bsc#943470)
+- low: ui_history: Better error handling and documentation for the detail command
+- low: ui_history: Swap from and to times if to < from
+- medium: cibconfig: XML parser support for node-attr fencing topology
+- medium: parse: Updated syntax for fencing-topology target attribute
+- medium: parse: Add support for node attribute as fencing topology target
+- high: scripts: Add enum type to script values
+- low: scripts: [MailTo] install mailx package
+- low: scripts: Fix typo in email type verifier
+- high: script: Fix subscript agent reference bug
+- low: constants: Add meta attributes for remote nodes
+- medium: scripts: Fix typo in lvm script
+- high: scripts: Generate actions for includes if none are defined
+- low: scripts: [virtual-ip] make lvs_support an advanced parameter
+- medium: crm_pssh: Timeout is an int (bsc#943820)
+- medium: scripts: Add MailTo script
+- low: scripts: Improved script parameter validation
+- high: parse: Fix crash when referencing score types by name (bsc#940194)
+- doc: Clarify documentation for colocations using node-attribute
+- high: ui_script: Print cached errors in json run
+- medium: scripts: Use --no option over --force unless force: true is set in the script
+- medium: options: Add --no option
+- high: scripts: Default to passing --force to crm after all
+- high: scripts: Add force parameter to cib and crm actions, and don't pass --force by default
+- low: scripts: Make virtual IP optional [nfsserver]
+- medium: scripts: Ensure that the Filesystem resource exists [nfsserver] (bsc#898658)
+- medium: report: Reintroduce empty transition pruning (bsc#943291)
+- low: hb_report: Collect libqb version (bsc#943327)
+- medium: log_patterns: Remove reference to function name in log patterns (bsc#942906)
+- low: hb_report: Increase time to wait for the logmark
+- high: hb_report: Always prefer syslog if available (bsc#942906)
+- high: report: Update transition edge regexes (bsc#942906)
+- medium: scripts: Switch install default to false
+- low: scripts: Catch attempt to pass dict as parameter value
+- high: report: Output format from pacemaker has changed (bsc#941681)
+- high: hb_report: Prefer pacemaker.log if it exists (bsc#941681)
+- medium: report: Add pacemaker.log to find_node_log list (bsc#941734)
+- high: hb_report: Correct path to hb_report after move to subdirectory (bsc#936026)
+- low: main: Bash completion didn't handle sudo correctly
+- medium: config: Add report_tool_options (bsc#917638)
+- high: parse: Add attributes to terminator set (bsc#940920)
+- Medium: cibconfig: skip sanity check for properties other than cib-bootstrap-options
+- medium: ui_script: Fix bug in verify json encoding
+- low: ui_script: Check JSON command syntax
+- medium: ui_script: Add name to action output (fate#318211)
+- low: scripts: Preserve formatting of longdescs
+- low: scripts: Clearer shortdesc for filesystem
+- low: scripts: Fix formatting for SAP scripts
+- low: scripts: add missing type annotations to libvirt script
+- low: scripts: make overridden parameters non-advanced by default
+- low: scripts: Tweak description for libvirt
+- low: scripts: Strip shortdesc for scripts and params
+- low: scripts: Title and category for exportfs
+- high: ui_script: drop end sentinel from API output (fate#318211)
+- low: scripts: Fix possible reference error in agent include
+- low: scripts: Clearer error message
+- low: Remove build revision from version
+- low: Add HAProxy script to data manifest
+- medium: constants: Add 'provides' meta attribute (bsc#936587)
+- medium: scripts: Add HAProxy script
+- high: hb_report: find utility scripts after move (bsc#936026)
+- high: ui_report: Move hb_report to subdirectory (bsc#936026)
+- high: Makefile: Don't unstall hb_report using data-manifest (bsc#936026)
+- medium: report: Fall back to cluster-glue hb_report if necessary (bsc#936026)
+- medium: scripts: stop inserting comments as values
+- high: scripts: subscript values not required if subscript has no parameters / all defaults (fate#318211)
+- medium: scripts: Fix name override for subscripts (fate#318211)
+- low: scripts: Clean up generated CIB (fate#318211)
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+Cheers,
+Kristoffer
diff --git a/doc/website-v1/news/2016-04-28-release-2_2_1.adoc b/doc/website-v1/news/2016-04-28-release-2_2_1.adoc
new file mode 100644
index 0000000..e162b2f
--- /dev/null
+++ b/doc/website-v1/news/2016-04-28-release-2_2_1.adoc
@@ -0,0 +1,73 @@
+crmsh 2.2.1 and 2.1.6 are released
+==================================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2016-04-28 01:00
+
+Today I'm announcing two new releases of crmsh: 2.2.1 and 2.1.6.
+
+Both are stable releases, and I would recommend all users to
+upgrade to crmsh 2.2.1 if they can.
+
+== crmsh 2.2.1
+
+Here is a brief list of the most significant changes in this release:
+
+* Rewritten history explorer internally to improve performance significantly
+* Allow configuring reload operation
+* Fix fencing for remote nodes
+* Recycle corosync node IDs when possible
+* Several bug fixes in crm report
+* Warn if generated report is empty
+* Improved SBD cluster script
+* Add push method for configure load
+* Fixed cluster init
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/2.2.1
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.2.1.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.2.1.zip
+
+For the full list of changes since version 2.2.0, see the ChangeLog,
+available at:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.2.1/ChangeLog
+
+== crmsh 2.1.6
+
+This is a bug fix release.
+Most fixes are minor or related to time handling in reports.
+
+For a complete list of changes since the previous version, please
+refer to the changelog:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.1.6/ChangeLog
+
+Currently I'm not building binary releases for 2.1.6 as the stable
+series (at OBS) is at 2.2. This release is intended for users of
+the 2.1 series who have yet to migrate to 2.2.
+
+Archives of the tagged release:
+
+https://github.com/ClusterLabs/crmsh/archive/2.1.6.tar.gz
+https://github.com/ClusterLabs/crmsh/archive/2.1.6.zip
+
+For the full list of changes since version 2.1.5, see the ChangeLog,
+available at:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.1.5/ChangeLog
+
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+Cheers,
+Kristoffer
diff --git a/doc/website-v1/news/2016-08-12-release-2_3_0.adoc b/doc/website-v1/news/2016-08-12-release-2_3_0.adoc
new file mode 100644
index 0000000..bb3efa9
--- /dev/null
+++ b/doc/website-v1/news/2016-08-12-release-2_3_0.adoc
@@ -0,0 +1,76 @@
+Releasing crmsh version 2.3.0
+=============================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2016-08-12 10:30
+
+Hello everyone!
+
+I am proud to present crmsh version 2.3.0, the latest stable
+release. I would recommend all users to upgrade to 2.3.0 if they
+can.
+
+For this release, I would like to begin by highlighting the new
+contributors to crmsh since 2.2.0 was released in January:
+
+* Marc A. Smith added the new subcommand "configure load push", which
+ removes any configuration lines that aren't included in the cib
+ provided when pushing.
+
+* Andrei Maruha added an optional name parameter to the "corosync
+ add-node" command, and made the add-node command recycle old node
+ IDs if possible.
+
+* Kai Kang fixed a build system bug when removing generated docs,
+ causing issues with parallel make.
+
+* Daniel Hoffend contributed various fixes improving support for
+ building crmsh for Debian and Ubuntu.
+
+* Pedro Salgado fixed a bug in the graph rendering code in crmsh,
+ added a tox configuration file to make testing with multiple
+ versions of Python easy, and updated the Travis CI configuration to
+ use tox.
+
+* Nate Clark fixed a bug in the parser for fencing hierarchies.
+
+I would also like to thank all the other contributors, testers and
+users who have helped in making this release as stable and reliable as
+possible.
+
+Some of the other major features in 2.3.0 include:
+
+* Support for the new event-based alerts feature in Pacemaker 1.1.15
+
+* Greatly improved timezone handling in crm report and the history
+ explorer
+
+* Improvements to the cluster scripts / wizards, as well as new
+ wizards for LVM on DRBD, and NFS on LVM and DRBD and VMware/vCenter
+
+* Better support for fencing remote nodes
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/2.3.0
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.3.0.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.3.0.zip
+
+For the full list of changes since version 2.3.0, see the ChangeLog,
+available at:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.3.0/ChangeLog
+
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+Cheers,
+Kristoffer
diff --git a/doc/website-v1/news/2016-09-01-release-2_1_7.adoc b/doc/website-v1/news/2016-09-01-release-2_1_7.adoc
new file mode 100644
index 0000000..d1e477e
--- /dev/null
+++ b/doc/website-v1/news/2016-09-01-release-2_1_7.adoc
@@ -0,0 +1,46 @@
+Announcing crmsh stable release 2.1.7
+=====================================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2016-09-01 09:00
+
+Today I are proud to announce the release of `crmsh` version 2.1.7!
+The major new thing in this release is a backports of the events-based
+alerts support from the 2.3 branch.
+
+Big thanks to Hideo Yamauchi for his patience and testing of the
+alerts backport.
+
+This time, the list of changes is small enough that I can add it right
+here:
+
+- high: parse: Backport of event-driven alerts parser (#150)
+- high: hb_report: Don't collect logs from journalctl if -M is set (bsc#990025)
+- high: hb_report: Skip lines without timestamps in log correctly (bsc#989810)
+- high: constants: Add maintenance to set of known attributes (bsc#981659)
+- high: utils: Avoid deadlock if DC changes during idle wait (bsc#978480)
+- medium: scripts: no-quorum-policy=ignore is deprecated (bsc#981056)
+- low: cibconfig: Don't mix up CLI name with XML tag
+
+You can also get the list of changes from the changelog:
+
+* https://github.com/ClusterLabs/crmsh/blob/2.1.7/ChangeLog
+
+Right now, I don't have a set of pre-built rpm packages for Linux
+distributions ready, but I am going to make this available soon. This
+is in particular for centOS 6.x which still relies on Python 2.6
+support which makes running the later releases there more
+difficult. These packages will most likely appear as a subrepository
+here (more details coming soon):
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.1.7.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.1.7.zip
+
+
+Thank you,
+
+Kristoffer
diff --git a/doc/website-v1/news/2016-09-02-release-2_3_1.adoc b/doc/website-v1/news/2016-09-02-release-2_3_1.adoc
new file mode 100644
index 0000000..2a90bef
--- /dev/null
+++ b/doc/website-v1/news/2016-09-02-release-2_3_1.adoc
@@ -0,0 +1,33 @@
+Releasing crmsh version 2.3.1
+=============================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2016-09-02 10:00
+
+Hello everyone!
+
+Today I am releasing crmsh version 2.3.1. The only change this time is
+to lower the Python version requirement from 2.7 to 2.6. This is so
+that crmsh remains compatible with centOS 6, where there is no
+standardized Python 2.7 version available. For users of other
+distributions where Python 2.7 is available, there are no other
+changes in this release and no need to upgrade.
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/2.3.1
+
+Packages for several popular Linux distributions can be downloaded
+from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.3.1.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.3.1.zip
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+Cheers,
+Kristoffer
diff --git a/doc/website-v1/news/2016-09-05-release-2_2_2.adoc b/doc/website-v1/news/2016-09-05-release-2_2_2.adoc
new file mode 100644
index 0000000..9816b3c
--- /dev/null
+++ b/doc/website-v1/news/2016-09-05-release-2_2_2.adoc
@@ -0,0 +1,36 @@
+Releasing crmsh version 2.2.2
+=============================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2016-09-05 19:00
+
+Hello everyone!
+
+Today I am releasing crmsh version 2.2.2. The biggest change in this
+release is the backport of the support for event-based alerts from the
+2.3 branch. The full list of changes follows below:
+
+- high: parse: Backport of event-driven alerts parser (#150)
+- high: hb_report: Don't collect logs from journalctl if -M is set (bsc#990025)
+- high: hb_report: Skip lines without timestamps in log correctly (bsc#989810)
+- high: constants: Add maintenance to set of known attributes (bsc#981659)
+- high: utils: Avoid deadlock if DC changes during idle wait (bsc#978480)
+- medium: scripts: no-quorum-policy=ignore is deprecated (bsc#981056)
+- medium: tmpfiles: Create temporary directory if non-existing (bsc#981583)
+- medium: xmlutil: reduce unknown attribute to warning (bsc#981659)
+- medium: ui_resource: Add force argument to resource cleanup (bsc#979420)
+- parse: Use original _TARGET_RE
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/2.2.2
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/2.2.2.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/2.2.2.zip
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+Cheers,
+Kristoffer
diff --git a/doc/website-v1/news/2017-01-31-release-3_0_0.adoc b/doc/website-v1/news/2017-01-31-release-3_0_0.adoc
new file mode 100644
index 0000000..31eacea
--- /dev/null
+++ b/doc/website-v1/news/2017-01-31-release-3_0_0.adoc
@@ -0,0 +1,48 @@
+Releasing crmsh version 3.0.0
+=============================
+:Author: Kristoffer Gronlund
+:Email: kgronlund@suse.com
+:Date: 2017-01-31 10:00
+
+Hello everyone!
+
+I'm happy to announce the release of crmsh version 3.0.0 today. The
+main reason for the major version bump is because I have merged the
+sleha-bootstrap project with crmsh, replacing the cluster
+init/add/remove commands with the corresponding commands from
+sleha-bootstrap.
+
+At the moment, these commands are highly specific to SLE and openSUSE,
+unfortunately. I am working on making them as distribution agnostic as
+possible, but would appreciate help from users of other distributions
+in making them work as well on those platforms as they do on
+SLE/openSUSE.
+
+Briefly, the "cluster init" command configures a complete cluster from
+scratch, including optional configuration of fencing via SBD, shared
+storage using OCFS2, setting up the Hawk web interface etc.
+
+There are some other changes in this release as well, see the
+ChangeLog for the complete list of changes:
+
+* https://github.com/ClusterLabs/crmsh/blob/3.0.0/ChangeLog
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/3.0.0
+
+This version of crmsh will be available in openSUSE Tumbleweed as soon
+as possible, and packages for several popular Linux distributions are
+available from the Stable repository at the OBS:
+
+* http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/
+
+Archives of the tagged release:
+
+* https://github.com/ClusterLabs/crmsh/archive/3.0.0.tar.gz
+* https://github.com/ClusterLabs/crmsh/archive/3.0.0.zip
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+Cheers,
+Kristoffer
diff --git a/doc/website-v1/news/2021-06-17-release-4_3_1.adoc b/doc/website-v1/news/2021-06-17-release-4_3_1.adoc
new file mode 100644
index 0000000..1b6a4a6
--- /dev/null
+++ b/doc/website-v1/news/2021-06-17-release-4_3_1.adoc
@@ -0,0 +1,55 @@
+Releasing crmsh version 4.3.1
+=============================
+:Author: Xin Liang
+:Email: XLiang@suse.com
+:Date: 2021-06-17 11:00
+
+Hello everyone!
+
+I'm happy to announce the release of crmsh version 4.3.1
+
+Major changes since 4.3.0
+
+Features:
+
+* Add "crm cluster crash_test" for cluster failure simulation (#825)
+
+* Add ocfs2.OCFS2Manager to manage ocfs2 stage process with cluster lvm2 (#798)
+
+* Support setup SBD via bootstrap "sbd" stage on an existing cluster (#744)
+
+* Enable configuring qdevice on interactive mode (#765)
+
+Fixes:
+
+* Adjust sbd watchdog timeout when using diskless SBD with qdevice (#818)
+
+* Not allow property setting with an empty value (#817)
+
+* Keep consistent for "help <sub-command>" and "<sub-command> -h" for those using argparse (#644)
+
+* Sync corosync.conf before finished joining (#775)
+
+* Adjust qdevice configure/remove process to avoid race condition due to quorum lost (#741)
+
+* Walk through hb_report process under hacluster (#742)
+
+There are some other changes in this release as well, see the
+ChangeLog for the complete list of changes:
+
+* https://github.com/ClusterLabs/crmsh/blob/master/ChangeLog
+
+The source code can be downloaded from Github:
+
+* https://github.com/ClusterLabs/crmsh/releases/tag/4.3.1
+
+Development packages for openSUSE Tumbleweed
+are available from the Open Build System, here:
+
+* https://build.opensuse.org/package/show/network:ha-clustering:Factory/crmsh
+
+As usual, a huge thank you to all contributors and users of crmsh!
+
+
+Regards,
+xin
diff --git a/doc/website-v1/postprocess.py b/doc/website-v1/postprocess.py
new file mode 100644
index 0000000..859abaa
--- /dev/null
+++ b/doc/website-v1/postprocess.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+# create a table of contents for pages that need it
+
+import sys
+import re
+import argparse
+
+TOC_PAGES = ['man/index.html',
+ 'man-4.3/index.html',
+ 'man-3/index.html',
+ 'man-2.0/index.html',
+ 'man-1.2/index.html']
+V2_PAGES = ['index.html']
+INSERT_AFTER = '<!--TOC-->'
+
+def read_toc_data(infile, debug):
+ topics_data = []
+ commands_data = []
+ f = open(infile)
+ for line in f:
+ if line.startswith('[['):
+ line = line[2:-3] # strip [[ and ]]\n
+ info, short_help = line.split(',', 1)
+ short_help = short_help.strip()
+ info_split = info.split('_')
+ if info_split[0] == 'topics':
+ if len(info_split) == 2:
+ topics_data.append((1, short_help, info))
+ elif len(info_split) >= 3:
+ topics_data.append((2, short_help, info))
+ elif info_split[0] == 'cmdhelp':
+ if len(info_split) == 2:
+ commands_data.append((2, info_split[1], info))
+ elif len(info_split) >= 3:
+ commands_data.append((3, '_'.join(info_split[2:]), info))
+ toc = ''
+ if len(topics_data) > 0 or len(commands_data) > 0:
+ toc = '<div id="toc">\n'
+ for depth, text, link in topics_data:
+ toc += '<div class="toclevel%s"><a href="#%s">%s</a></div>\n' % (
+ depth, link, text)
+ for depth, text, link in commands_data:
+ toc += '<div class="toclevel%s"><a href="#%s">%s</a></div>\n' % (
+ depth, link, text)
+ toc += '</div>\n'
+ return toc
+
+def generate_toc(infile, outfile, debug):
+
+ if debug:
+ print "Infile:", infile
+ toc = read_toc_data(infile, debug)
+ '''
+ toc_data = []
+ section = re.compile(r"<h(?P<depth>[0-9])( id=\"(?P<id>[^\"]+)\")?>(?P<text>.*)</h[0-9]>")
+ for line in f:
+ m = section.match(line)
+ if m:
+ if debug:
+ print "toc_data: %s" % str(((m.group('depth'), m.group('text'), m.group('id'))))
+ toc_data.append((m.group('depth'), m.group('text'), m.group('id')))
+
+ toc = ''
+ if len(toc_data) > 0:
+ toc = '<div id="toc">\n'
+ for depth, text, link in toc_data:
+ if depth >= 2 and link is not None:
+ toc += '<div class="toclevel%s"><a href="#%s">%s</a></div>\n' % (
+ int(depth) - 1, link, text)
+ toc += '</div>\n'
+'''
+
+ # Write TOC to outfile
+ if outfile:
+ if debug:
+ print "Writing TOC:"
+ print "----"
+ print toc
+ print "----"
+ print "Outfile:", outfile
+ fil = open(outfile)
+ f = fil.readlines()
+ fil.close()
+ f2 = open(outfile, 'w')
+ for line in f:
+ f2.write(line)
+ if toc and line.startswith(INSERT_AFTER):
+ f2.write(toc)
+ f2.close()
+
+def generate_v2(page, debug):
+ f = open(page).readlines()
+ toc_data = []
+ section = re.compile(r"<h(?P<depth>[0-9])( id=\"(?P<id>[^\"]+)\")?>(?P<text>.*)</h[0-9]>")
+ for line in f:
+ m = section.match(line)
+ if m:
+ if debug:
+ print "toc_data: %s" % str(((m.group('depth'), m.group('text'), m.group('id'))))
+ toc_data.append((m.group('depth'), m.group('text'), m.group('id')))
+
+ toc = ''
+ if len(toc_data) > 0:
+ toc = '<div id="toc">\n'
+ for depth, text, link in toc_data:
+ if depth >= 2 and link is not None:
+ toc += '<div class="toclevel%s"><a href="#%s">%s</a></div>\n' % (
+ int(depth) - 1, link, text)
+ toc += '</div>\n'
+ f2 = open(page, 'w')
+ for line in f:
+ f2.write(line)
+ if toc and line.startswith(INSERT_AFTER):
+ f2.write(toc)
+ f2.close()
+
+def main():
+ parser = argparse.ArgumentParser(description="Generate table of contents")
+ parser.add_argument('-d', '--debug', dest='debug', action='store_true',
+ help="Enable debug output")
+ parser.add_argument('-o', '--output', metavar='output', type=str,
+ help="File to inject TOC into")
+ parser.add_argument('input', metavar='input', type=str,
+ help="File to read TOC metadata from")
+ args = parser.parse_args()
+ debug = args.debug
+ outfile = args.output
+ infile = args.input
+ print "+ %s -> %s" % (infile, outfile)
+ gen = False
+ for tocpage in TOC_PAGES:
+ if not gen and outfile.endswith(tocpage):
+ generate_toc(infile, outfile, debug)
+ gen = True
+ for tocpage in V2_PAGES:
+ if not gen and outfile.endswith(tocpage):
+ generate_v2(outfile, debug)
+ gen = True
+
+if __name__ == "__main__":
+ main()
diff --git a/doc/website-v1/rsctest-guide.adoc b/doc/website-v1/rsctest-guide.adoc
new file mode 100644
index 0000000..2dcd865
--- /dev/null
+++ b/doc/website-v1/rsctest-guide.adoc
@@ -0,0 +1,238 @@
+= Resource testing =
+
+Never created a pacemaker cluster configuration before? Please
+read on.
+
+Ever created a pacemaker configuration without errors? All
+resources worked from the get go on all your nodes? Really? We
+want a photo of you!
+
+Seriously, it is so error prone to get a cluster resource
+definition right that I think I ever only managed to do it with
+`Dummy` resources. There are many intricate details that have to be
+just right, and all of them are stuffed in a single place as simple
+name-value attributes. Then there are multiple nodes, each node
+containing a complex system environment inevitably always in flux and
+changing (entropy anybody?).
+
+Now, once you defined your set of resources and are about to
+_commit_ the configuration (at that point it usually takes a
+deep breath to do so), be ready to meet an avalanche of error
+messages, not all of which are easy to understand or follow. Not
+to mention that you need to read the logs too. Even though we do
+have a link:history-tutorial.html[tool] to help with digging through
+the logs, it is going to be an interesting experience and not quite
+recommended if you're just starting with pacemaker clusters. Even the
+experts can save a lot of time and headaches by following the advice
+below.
+
+== Basic usage ==
+
+Enter resource testing. It is a special feature designed to help
+users find problems in resource configurations.
+
+The usage is very simple:
+
+----
+crm(live)configure# rsctest web-server
+Probing resources ..
+testing on xen-f: apache web-ip
+testing on xen-g: apache web-ip
+crm(live)configure#
+----
+
+What actually happened above and what is it good for? From the
+output we can infer that the `web-server` resource is actually a
+group comprising one apache web server and one IP address.
+Indeed:
+
+----
+crm(live)configure# show web-server
+group web-server apache web-ip \
+ meta target-role="Stopped"
+crm(live)configure#
+----
+
+The `rsctest` command first established that the resources are
+stopped on all nodes in the cluster. Then it tests the resources
+in the order defined by the resource group on all nodes. It does
+this by manually starting the resources, one by one, then running
+a "monitor" for each resource to make sure that the resources are
+healthy, and finally stopping the resources in reverse order.
+
+Since there is no additional output, the test passed. It looks
+like we have a properly defined web server group.
+
+== Reporting problems ==
+
+Now, the above run was not very interesting so let's spoil the
+idyll:
+
+----
+xen-f:~ # mv /etc/apache2/httpd.conf /tmp
+----
+
+We moved the apache configuration file away on node `xen-f`. The
+`apache` resource should fail now:
+
+----
+crm(live)configure# rsctest web-server
+Probing resources ..
+testing on xen-f: apache
+host xen-f (exit code 5)
+xen-f stderr:
+2013/10/17_16:51:26 ERROR: Configuration file /etc/apache2/httpd.conf not found!
+2013/10/17_16:51:26 ERROR: environment is invalid, resource considered stopped
+
+testing on xen-g: apache web-ip
+crm(live)configure#
+----
+
+As expected, `apache` failed to start on node `xen-f`. When the
+cluster resource manager runs an operation on a resource, all
+messages are logged (there is no terminal attached to the
+cluster, anyway). All one can see in the resource status is the type
+of the exit code. In this case, it is an installation problem.
+
+For instance, the output could look like this:
+
+----
+xen-f:~ # crm status
+Last updated: Thu Oct 17 19:21:44 2013
+Last change: Thu Oct 17 19:21:28 2013 by root via crm_resource on xen-f
+...
+Failed actions:
+ apache_start_0 on xen-f 'not installed' (5): call=2074, status=complete,
+last-rc-change='Thu Oct 17 19:21:31 2013', queued=164ms, exec=0ms
+----
+
+That does not look very informative. With `rsctest` we can
+immediately see what the problem is. It saves us prowling the
+logs looking for messages of the `apache` resource agent.
+
+Note that the IP address is not tested, because the resource it
+depends on could not be started.
+
+== What is tested? ==
+
+The start, monitor, and stop operations, in exactly that order,
+are tested for every resource specified. Note that normally the
+two latter operations should never fail if the resource agent is
+well implemented. The RA should under normal circumstances be
+able to stop or monitor a started resource. However, this is
+_not_ a replacement for resource agent testing. If that is what
+you are looking for, see
+http://www.linux-ha.org/doc/dev-guides/_testing_resource_agents.html[the
+RA testing chapter] of the RA development guide.
+
+== Protecting resources ==
+
+The `rsctest` command goes to great lengths to prevent starting a
+resource on more than one node at the same time. For some stuff
+that would actually mean data corruption and we certainly don't
+want that to happen.
+
+----
+xen-f:~ # (echo start web-server; echo show web-server) | crm -w resource
+resource web-server is running on: xen-g
+xen-f:~ # crm configure rsctest web-server
+Probing resources .WARNING: apache:probe: resource running at xen-g
+.WARNING: web-ip:probe: resource running at xen-g
+
+Stop all resources before testing!
+xen-f:~ # crm configure rsctest web-server xen-f
+Probing resources .WARNING: apache:probe: resource running at xen-g
+.WARNING: web-ip:probe: resource running at xen-g
+
+Stop all resources before testing!
+xen-f:~ #
+----
+
+As you can see, if `rsctest` finds any of the resources running
+on any node it refuses to run any tests.
+
+== Multi-state and clone resources ==
+
+Apart from groups, the `rsctest` can also handle the other two
+special kinds of resources. Let's take a look at one `drbd`-based
+configuration:
+
+----
+crm(live)configure# show ms_drbd_nfs drbd0-vg
+primitive drbd0-vg ocf:heartbeat:LVM \
+ params volgrpname="drbd0-vg"
+primitive p_drbd_nfs ocf:linbit:drbd \
+ meta target-role="Stopped" \
+ params drbd_resource="nfs" \
+ op monitor interval="15" role="Master" \
+ op monitor interval="30" role="Slave" \
+ op start interval="0" timeout="300" \
+ op stop interval="0" timeout="120"
+ms ms_drbd_nfs p_drbd_nfs \
+ meta notify="true" clone-max="2"
+crm(live)configure#
+----
+
+The `nfs` drbd resource contains a volume group `drbd0-vg`.
+
+----
+crm(live)configure# rsctest ms_drbd_nfs drbd0-vg
+Probing resources ..
+testing on xen-f: p_drbd_nfs drbd0-vg
+testing on xen-g: p_drbd_nfs drbd0-vg
+crm(live)configure#
+----
+
+For the multi-state (master-slave) resources, the involved
+resource motions are somewhat more complex: the resource is first
+started on both nodes and then promoted on the node where the
+next resource is to be tested (in this case the volume group).
+Then it gets demoted to slave and promoted on the other
+node to master so that the depending resources can be tested on
+that node too.
+
+Note that even though we asked for `ms_drbd_nfs` to be tested,
+there is `p_drbd_nfs` in the output which is the primitive
+encapsulated in the master-slave resource. You can specify either
+one.
+
+== Stonith resources ==
+
+The stonith resources are also special and need special
+treatment. What is tested is just the device status. Actually
+fencing nodes was deemed too drastic. Please use `node fence` to
+test the fencing device effectiveness. It also does not matter
+whether the stonith resource is "running" on any node: being
+started is just something that happens virtually in the
+`stonithd` process.
+
+== Summary ==
+
+- use `rsctest` to make sure that the resources can be started
+ correctly on all nodes
+
+- `rsctest` protects resources by making sure beforehand that
+ none of them is currently running on any of the cluster nodes
+
+- `rsctest` understands groups, master-slave (multi-state), and
+ clone resources, but nothing else of the configuration
+ (constraints or any other placement/order cluster configuration
+ elements)
+
+- it is up to the user to test resources only on nodes which are
+ really supposed to run them and in a proper order (if that
+ order is expressed via constraints)
+
+- `rsctest` cannot protect resources if they are running on
+ nodes which are not present in the cluster or from bad RA
+ implementations (but neither would a cluster resource manager)
+
+- `rsctest` was designed as a debugging and configuration aid, and is
+ not intended to provide full Resource Agent test coverage.
+
+== `crmsh` help and online resources (_sic!_) ==
+
+- link:crm.8.html#topics_Testing[`crm help Testing`]
+
+- link:crm.8.html#cmdhelp_configure_rsctest[`crm configure help
+rsctest`]
diff --git a/doc/website-v1/scripts.adoc b/doc/website-v1/scripts.adoc
new file mode 100644
index 0000000..7742729
--- /dev/null
+++ b/doc/website-v1/scripts.adoc
@@ -0,0 +1,660 @@
+= Cluster Scripts =
+:source-highlighter: pygments
+
+.Version information
+NOTE: This section applies to `crmsh 2.2+` only.
+
+== Introduction ==
+
+A big part of the configuration and management of a cluster is
+collecting information about all cluster nodes and deploying changes
+to those nodes. Often, just performing the same procedure on all nodes
+will encounter problems, due to subtle differences in the
+configuration.
+
+For example, when configuring a cluster for the first time, the
+software needs to be installed and configured on all nodes before the
+cluster software can be launched and configured using `crmsh`. This
+process is cumbersome and error-prone, and the goal is for scripts to
+make this process easier.
+
+Another important function of scripts is collecting information and
+reporting potential issues with the cluster. For example, software
+versions may differ between nodes, causing byzantine errors or random
+failure. `crmsh` comes packaged with a `health` script which will
+detect and warn about many of these types of problems.
+
+There are many tools for managing a collection of nodes, and scripts
+are not intended to replace these tools. Rather, they provide an
+integrated way to perform tasks across the cluster that would
+otherwise be tedious, repetitive and error-prone. The scripts
+functionality in the crm shell is mainly inspired by Ansible, a
+light-weight and efficient configuration management tool.
+
+Scripts are implemented using the python `parallax` package which
+provides a thin wrapper on top of SSH. This allows the scripts to
+function through the usual SSH channels used for system maintenance,
+requiring no additional software to be installed or maintained.
+
+For many scripts that only configure cluster resources or only perform
+changes on the local machine, the use of SSH is not necessary. These
+scripts can be used even if there is no way for `crmsh` to reach the
+other nodes other than through the cluster configuration.
+
+NOTE: The scripts functionality in `crmsh` has been greatly expanded
+and improved in `crmsh` 2.2. Many new scripts have been added, and in
+addition the scripts are now used as the backend for the wizards
+functionality in HAWK, the HA web interface. For more information, see
+https://github.com/ClusterLabs/hawk.
+
+== Usage ==
+
+Scripts are available through the `cluster` sub-level in the crm
+shell. Some scripts have custom commands linked to them for
+convenience, such as the `init`, `add` and `remove` commands available
+in the `cluster` sublevel, for creating new clusters, introducing new
+nodes into the cluster and for removing nodes from a running cluster.
+
+Other scripts can be accessed through the `script` sub-level.
+
+=== Common Parameters ===
+
+Which parameters a script accepts varies from script to
+script. However, there is a set of parameters that are common to all
+scripts. These parameters can be passed to any script.
+
+`nodes`::
+ List of nodes to execute the script for
+`dry_run`::
+ If set, simulate execution only
+ (default: no)
+`action`::
+ If set, only execute a single action (index, as returned by verify)
+`statefile`::
+ When single-stepping, the state is saved in the given file
+`user`::
+ Run script as the given user
+`sudo`::
+ If set, crm will prompt for a sudo password and use sudo when appropriate
+ (default: no)
+`port`::
+ Port to connect on
+`timeout`::
+ Execution timeout in seconds
+ (default: 600)
+
+=== List available scripts ===
+
+To list the available scripts, use the following command:
+
+.........
+# crm script
+list
+.........
+
+The available scripts are listed along with a short
+description. Optionally, the arguments +all+ or +names+ can be
+used. Without the +all+ flag, some scripts that are used by `crmsh` to
+implement certain commands are hidden from view. With the +names+
+flag, only a plain list of script names is printed.
+
+=== Script description ===
+
+To get more details about a script, run the `show` command. For
+example, to get more information about what the `virtual-ip` script does
+and what parameters it accepts, use the following command:
+
+.........
+# crm script
+show virtual-ip
+.........
+
+`show` will print a longer description of the script, along with a
+list of parameters divided into _steps_. Each script is divided into a
+series of steps which are performed in order. Some steps may not
+accept any parameters, but for those that do, the available parameters
+are listed here.
+
+By default, only a basic subset of the available parameters is printed
+in order to make the scripts easier to use. By passing `all` to the
+`show` command, the advanced parameters are also shown. In addition,
+there is a list of common parameters
+
+`show` will print a longer explanation for the script, along with
+a list of parameters, each parameter having a description, a note
+saying if it is an optional or required parameter, and if optional,
+what the default value is.
+
+=== Verifying parameters ===
+
+Since a script potentially performs a series of actions and may fail
+for various reasons at any point, it is advisable to review the
+actions that a script will perform before actually running it. To do
+this, the `verify` command can be used.
+
+Pass the parameters that you would pass to `run`, and `verify` will
+check that the parameter values are OK, as well as print the sequence
+of steps that will be performed given the particular parameter values
+given.
+
+The following is an example showing how to verify the creation of a
+Virtual IP resource, using the `virtual-ip` script:
+
+..........
+# crm script
+verify virtual-ip id=my-virtual-ip ip=192.168.0.10
+..........
+
+`crmsh` will print something similar to the following output:
+
+...........
+1. Configure cluster resources
+
+ primitive my-virtual-ip ocf:heartbeat:IPaddr2
+ ip="192.168.0.10"
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+...........
+
+In this particular case, there is only a single step, and that step
+configures a primitive resource. Other scripts may configure multiple
+resources and constraints, or may perform multiple steps in sequence.
+
+=== Running a script ===
+
+To run a script, all required parameters and any optional parameters
+that should have values other than the default should be provided as
+`key=value` pairs on the command line.
+
+The following example shows how to create a Virtual IP resource using
+the `virtual-ip` script:
+
+........
+# crm script
+run virtual-ip id=my-virtual-ip ip=192.168.0.10
+........
+
+==== Single-stepping a script ====
+
+It is possible to run a script action-by-action, with manual intervention
+between actions. First of all, list the actions to perform given a
+certain set of parameter values:
+
+........
+crm script verify health
+........
+
+To execute a single action, two things need to be provided:
+
+1. The index of the action to execute (printed by `verify`)
+2. a file in which `crmsh` stores the state of execution.
+
+Note that it is entirely possible to run actions out-of-order, however
+this is unlikely to work in practice since actions often rely on the
+outcome of previous actions.
+
+The following command will execute the first action of the `health`
+script and store the output in a temporary file named `health.json`:
+
+........
+crm script run health action=1 statefile='health.json'
+........
+
+The statefile contains the script parameters and the output of
+previous steps, encoded as `json` data.
+
+To continue executing the next action in sequence, enter the next
+action index:
+
+........
+crm script run health action=2 statefile='health.json'
+........
+
+Note that the `dry_run` flag that can be used to do partial execution
+of scripts is not taken into consideration when single-stepping
+through a script.
+
+== Creating a script ==
+
+This section will describe how to create a new script, where to put
+the script to allow `crmsh` to find it, and how to test that the
+script works as intended.
+
+=== How scripts work, in detail ===
+
+NOTE: The implementation of cluster scripts was revised between
+`crmsh` 2.0 and `crmsh` 2.2. This section describes the revised
+cluster script format. The old format is still accepted by `crmsh`.
+
+A cluster script consists of four main sections:
+
+. The name and description of the script.
+. Any other scripts or agents included by this script, and any parameter value overrides to those provided by the included script.
+. A set of parameters accepted by the script itself, in addition to those accepted by any scripts or agents included in the script.
+. A sequence of actions which the script will perform.
+
+When the script runs, the actions defined in `main.yml` as described
+below are executed one at a time. Each action prescribes a
+modification that is applied to the cluster. Some actions work by
+calling out to scripts on each of the cluster nodes, and others apply
+only on the local node from which the script was executed.
+
+=== Actions ===
+
+Scripts perform actions that are classified into a few basic
+types. Each action is performed by calling out to a shell script,
+but the arguments and location of that script varies depending on the
+type.
+
+Here are the types of script actions that can be performed:
+
+cib::
+ * Applies a new CIB configuration to the cluster
+
+install::
+ * Ensures that the given list of packages is installed on all
+ cluster nodes using the system package manager.
+
+service::
+ * Manages system services using the system init tools. The argument
+ should be a space-separated list of <service>:<state> pairs.
+
+call::
+ * Run a shell command as specified in the action, either on the
+ local node on or all nodes.
+
+copy::
+ * Installs a file on the cluster nodes.
+ * Using a configuration template, install a file on the cluster
+ nodes.
+
+crm::
+ * Runs the given command using the `crm` shell. This can be used to
+ start and stop resources, for example.
+
+collect::
+ * Runs on all cluster nodes
+ * Gathers information about the nodes, both general information and
+ information specific to the script.
+
+validate::
+ * Runs on the local node
+ * Validate parameter values and node state based on collected
+ information. Can modify default values and report issues that
+ would prevent the script from applying successfully.
+
+apply::
+ * Runs on all or any cluster nodes
+ * Applies changes, returning information about the applied changes
+ to the local node.
+
+apply_local::
+ * Runs on the local node
+ * Applies changes to the cluster, where an action taken on a single
+ node affect the entire cluster. This includes updating the CIB in
+ Pacemaker, and also reloading the configuration for Corosync.
+
+report::
+ * Runs on the local node
+ * This is similar to the _apply_local_ action, with the difference
+ that the output of a Report action is not interpreted as JSON data
+ to be passed to the next action. Instead, the output is printed to
+ the screen.
+
+==== When expressions ====
+
+Actions can be made conditional on the value of script parameters using
+the +when:+ expression. This expression has two basic forms.
+
+The first form is in the form of the name of a script parameter. For
+example, given a boolean script parameter named +install+, an action
+can be made conditional on that parameter being true using the syntax
++when: install+.
+
+The second form is a more complex expression. All parameters are
+interpreted as either a string value or None if no value was provided.
+These can be compared to string literals using python-style
+comparators. For example, an action can be conditional on the string
+parameter +mode+ having the value +"advanced"+ using the following
+syntax: +when: mode == "advanced"+.
+
+=== Basic structure ===
+
+The crm shell looks for scripts in two primary locations: Included
+scripts are installed in the system-wide shared folder, usually
+`/usr/share/crmsh/scripts/`. Local and custom scripts are loaded from
+the user-local XDG_CONFIG folder, usually found at
+`~/.local/crm/scripts/`. These locations may differ depending on how
+the crm shell was installed and which system is used, but these are
+the locations used on most distributions.
+
+To create a new script, make a new folder in the user-local scripts
+folder and give it a unique name. In this example, we will call our
+new script `check-uptime`.
+
+........
+mkdir -p ~/.local/crm/scripts/check-uptime
+........
+
+In this directory, create a file called `main.yml`. This is a YAML
+document which describes the script, which parameters it requires, and
+what actions it will perform.
+
+YAML is a human-readable markup language which is designed to be easy
+to read and modify, while at the same time be compatible with JSON. To
+learn more, see http:://yaml.org/[yaml.org].
+
+Here is an example `main.yml` file which wraps the resource agent
+`ocf:heartbeat:IPaddr2`.
+
+[source,yaml]
+----
+# The version must be exactly 2.2, and must always be
+# specified in the script. If the version is missing or
+# is less than 2.2, the script is assumed to be a legacy
+# script (specified in the format used before crmsh 2.2).
+version: 2.2
+shortdesc: Virtual IP
+category: Basic
+include:
+ - agent: ocf:heartbeat:IPaddr2
+ name: virtual-ip
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: ip
+ type: ip_address
+ required: true
+ - name: cidr_netmask
+ type: integer
+ required: false
+ - name: broadcast
+ type: ip_address
+ required: false
+ ops: |
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+actions:
+ - include: virtual-ip
+----
+
+For a bigger example, here is the `apache` agent which includes
+multiple optional steps, the optional installation of packages,
+defines multiple cluster resources and potentially calls bash commands
+on each of the cluster nodes.
+
+[source,yaml]
+----
+# Copyright (C) 2009 Dejan Muhamedagic
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Server
+shortdesc: Apache Webserver
+longdesc: |
+ Configure a resource group containing a virtual IP address and
+ an instance of the Apache web server.
+
+ You can optionally configure a Filesystem resource which will be
+ mounted before the web server is started.
+
+ You can also optionally configure a database resource which will
+ be started before the web server but after mounting the optional
+ filesystem.
+include:
+ - agent: ocf:heartbeat:apache
+ name: apache
+ longdesc: |
+ The Apache configuration file specified here must be available via the
+ same path on all cluster nodes, and Apache must be configured with
+ mod_status enabled. If in doubt, try running Apache manually via
+ its init script first, and ensure http://localhost:80/server-status is
+ accessible.
+ ops: |
+ op start timeout="40"
+ op stop timeout="60"
+ op monitor interval="10" timeout="20"
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the Apache instance.
+ parameters:
+ - name: id
+ value: "{{id}}-vip"
+ - script: filesystem
+ shortdesc: Optional filesystem mounted before the web server is started.
+ required: false
+ - script: database
+ shortdesc: Optional database started before the web server is started.
+ required: false
+parameters:
+ - name: install
+ type: boolean
+ shortdesc: Install and configure apache
+ value: false
+actions:
+ - install:
+ - apache2
+ shortdesc: Install the apache package
+ when: install
+ - service:
+ - apache: disable
+ shortdesc: Let cluster manage apache
+ when: install
+ - call: a2enmod status; true
+ shortdesc: Enable status module
+ when: install
+ - include: filesystem
+ - include: database
+ - include: virtual-ip
+ - include: apache
+ - cib: |
+ group g-{{id}}
+ {{filesystem:id}}
+ {{database:id}}
+ {{virtual-ip:id}}
+ {{id}}
+----
+
+The language for referring to parameter values in `cib` actions is
+described below.
+
+=== Command arguments ===
+
+The actions that accept a command as argument must not refer to
+commands written in python. They can be plain bash scripts or any
+other executable script as long as the nodes have the necessary
+dependencies installed. However, see below why implementing scripts in
+Python is easier.
+
+Actions report their progress either by returning JSON on standard
+output, or by returning a non-zero return value and printing an error
+message to standard error.
+
+Any JSON returned by an action will be available to the following
+steps in the script. When the script executes, it does so in a
+temporary folder created for that purpose. In that folder is a file
+named `script.input`, containing a JSON array with the output produced
+by previous steps.
+
+The first element in the array (the zeroth element, to be precise) is
+a dict containing the parameter values.
+
+The following elements are dicts with the hostname of each node as key
+and the output of the action generated by that node as value.
+
+In most cases, only local actions (`validate` and `apply_local`) will
+use the information in previous steps, but scripts are not limited in
+what they can do.
+
+With this knowledge, we can implement `fetch.py` and `report.py`.
+
+`fetch.py`:
+
+[source,python]
+----
+#!/usr/bin/python3
+import crm_script as crm
+try:
+ uptime = open('/proc/uptime').read().split()[0]
+ crm.exit_ok(uptime)
+except Exception as e:
+ crm.exit_fail("Couldn't open /proc/uptime: %s" % (e))
+----
+
+`report.py`:
+
+[source,python]
+----
+#!/usr/bin/python3
+import crm_script as crm
+show_all = crm.is_true(crm.param('show_all'))
+uptimes = list(crm.output(1).items())
+max_uptime = '', 0
+for host, uptime in uptimes:
+ if float(uptime) > max_uptime[1]:
+ max_uptime = host, float(uptime)
+if show_all:
+ print("Uptimes: %s" % (', '.join("%s: %s" % v for v in uptimes)))
+print("Longest uptime is %s seconds on host %s" % (max_uptime[1], max_uptime[0]))
+----
+
+See below for more details on the helper library `crm_script`.
+
+Save the scripts as executable files in the same directory as the
+`main.yml` file.
+
+Before running the script, it is possible to verify that the files are
+in a valid format and in the right location. Run the following
+command:
+
+........
+crm script verify check-uptime
+........
+
+If the verification is successful, try executing the script with the
+following command:
+
+........
+crm script run check-uptime
+........
+
+Example output:
+
+[source,bash]
+----
+# crm script run check-uptime
+INFO: Check uptime of nodes
+INFO: Nodes: ha-three, ha-one
+OK: Fetch uptimes
+OK: Report uptime
+Longest uptime is 161054.04 seconds on host ha-one
+----
+
+To see if the `show_all` parameter works as intended, run the
+following:
+
+........
+crm script run check-uptime show_all=yes
+........
+
+Example output:
+
+[source,bash]
+----
+# crm script run check-uptime show_all=yes
+INFO: Check uptime of nodes
+INFO: Nodes: ha-three, ha-one
+OK: Fetch uptimes
+OK: Report uptime
+Uptimes: ha-one: 161069.83, ha-three: 159950.38
+Longest uptime is 161069.83 seconds on host ha-one
+----
+
+=== Remote permissions ===
+
+Some scripts may require super-user access to remote or local
+nodes. It is recommended that this is handled through SSH certificates
+and agents, to facilitate password-less access to nodes.
+
+=== Running scripts without a cluster ===
+
+All cluster scripts can optionally take a `nodes` argument, which
+determines the nodes that the script will run on. This node list is
+not limited to nodes already in the cluster. It is even possible to
+execute cluster scripts before a cluster is set up, such as the
+`health` and `init` scripts used by the `cluster` sub-level.
+
+........
+crm script run health nodes=example1,example2
+........
+
+The list of nodes can be comma- or space-separated, but if the list
+contains spaces, the whole argument will have to be quoted:
+
+........
+crm script run health nodes="example1 example2"
+........
+
+=== Running in validate mode ===
+
+It may be desirable to do a dry-run of a script, to see if any
+problems are present that would make the script fail before trying to
+apply it. To do this, add the argument `dry_run=yes` to the invocation:
+
+.........
+crm script run health dry_run=yes
+.........
+
+The script execution will stop at the first `apply` action. Note that
+non-modifying steps that happen after the first `apply` action will
+not be performed in a dry run.
+
+=== Helper library ===
+
+When the script data is copied to each node, a small helper library is
+also passed along with the script. This library can be found in
+`utils/crm_script.py` in the source repository. This library helps
+with producing output in the correct format, parsing the
+`script.input` data provided to scripts, and more.
+
+.`crm_script` API
+`host()`::
+ Returns hostname of current node
+`get_input()`::
+ Returns the input data list. The first element in the list
+ is a dict of the script parameters. The rest are the output
+ from previous steps.
+`parameters()`::
+ Returns the script parameters as a dict.
+`param(name)`::
+ Returns the value of the named script parameter.
+`output(step_idx)`::
+ Returns the output of the given step, with the first step being step 1.
+`exit_ok(data)`::
+ Exits the step returning `data` as output.
+`exit_fail(msg)`::
+ Exits the step returning `msg` as error message.
+`is_true(value)`::
+ Converts a truth value from string to boolean.
+`call(cmd, shell=False)`::
+ Perform a system call. Returns `(rc, stdout, stderr)`.
+
+=== The handles language ===
+
+CIB configurations and commands can refer to the value of parameters
+in the text of the action. This is done using a custom language,
+similar to handlebars.
+
+The language accepts the following constructions:
+
+............
+{{name}} = Inserts the value of the parameter <name>
+{{script:name}} = Inserts the value of the parameter <name> from the
+ included script named <script>.
+{{#name}} ... {{/name}} = Inserts the text between the mustasches when
+ name is truthy.
+{{^name}} ... {{/name}} = Inserts the text between the mustasches when
+ name is falsy.
+............
+
diff --git a/doc/website-v1/start-guide.adoc b/doc/website-v1/start-guide.adoc
new file mode 100644
index 0000000..7ad6a82
--- /dev/null
+++ b/doc/website-v1/start-guide.adoc
@@ -0,0 +1,208 @@
+= Getting Started
+
+So, you've successfully installed `crmsh` on one or more machines, and
+now you want to configure a basic cluster. This guide is intended to
+provide step-by-step instructions for configuring Pacemaker
+with a single resource capable of failing over between a pair of
+nodes, and then builds on that base to cover some more advanced topics
+of cluster management.
+
+****
+Haven't installed yet? Please follow the
+link:/installation[installation instructions]
+before continuing this guide. Only `crmsh` and
+its dependencies need to be installed before
+following this guide.
+****
+
+Before continuing, make sure that this command executes successfully
+on all nodes, and returns a version number that is `3.0` or higher:
+
+........
+crm --version
+........
+
+****
+In crmsh 3, the cluster init commands were replaced by the SLE HA
+bootstrap scripts. These rely on `csync2` for configuration file
+management, so make sure that you have the `csync2` command
+installed before proceeding. This requirement may be removed in
+the future.
+****
+
+.Example cluster
+**************************
+
+These are the machines used as an example in this guide. Please
+replace the references to these names and IP addresses to the values
+appropriate for your cluster:
+
+
+[options="header,footer"]
+|=======================
+|Name |IP
+|alice |10.0.0.2
+|bob |10.0.0.3
+|=======================
+**************************
+
+
+== The cluster stack
+
+The composition of the GNU/Linux cluster stack has changed somewhat
+over the years. The stack described here is the currently most common
+variant, but there are other ways of configuring these tools.
+
+Simply put, a High Availability cluster is a set of machines (commonly
+referred to as *nodes*) with redundant capacity, such that if one or
+more of these machines experience failure of any kind, the other nodes
+in the cluster can take over the responsibilities previously handled
+by the failed node.
+
+The cluster stack is a set of programs running on all of these nodes,
+communicating with each other over the network to monitor each other
+and deciding where, when and how resources are stopped, started or
+reconfigured.
+
+The main component of the stack is *Pacemaker*, the software
+responsible for managing cluster resources, allocating them to cluster
+nodes according to the rules specified in the *CIB*.
+
+The CIB is an XML document maintained by Pacemaker, which describes
+all cluster resources, their configuration and the constraints that
+decide where and how they are managed. This document is not edited
+directly, and with the help of `crmsh` it is possible to avoid
+exposure to the underlying XML at all.
+
+Beneath Pacemaker in the stack sits *Corosync*, a cluster
+communication system. Corosync provides the communication capabilities
+and cluster membership functionality used by Pacemaker. Corosync is
+configured through the file `/etc/corosync/corosync.conf`. `crmsh`
+provides tools for configuring corosync similar to Pacemaker.
+
+Aside from these two components, the stack also consists of a
+collection of *Resource Agents*. These are basically scripts that wrap
+software that the cluster needs to manage, providing a unified
+interface to configuration, supervision and management of the
+software. For example, there are agents that handle virtual IP
+resources, web servers, databases and filesystems.
+
+`crmsh` is a command line tool which interfaces against all of these
+components, providing a unified interface for configuration and
+management of the whole cluster stack.
+
+== SSH
+
+`crmsh` runs as a command line tool on any one of the cluster
+nodes. In order for to to control all cluster nodes, it needs to be
+able to execute commands remotely. `crmsh` does this by invoking
+`ssh`.
+
+Configure `/etc/hosts` on each of the nodes so that the names of the
+other nodes map to the IP addresses of those nodes. For example in a
+cluster consisting of `alice` and `bob`, executing `ping bob` when
+logged in as root on `alice` should successfully locate `bob` on the
+network. Given the IP addresses of `alice` and `bob` above, the
+following should be entered into `/etc/hosts` on both nodes:
+
+........
+10.0.0.2 alice
+10.0.0.3 bob
+........
+
+== Install and configure
+
+To configure the basic cluster, we use the `cluster init` command
+provided by `crmsh`. This command has quite a few options for
+setting up the cluster, but we will use a fairly basic configuration.
+
+........
+crm cluster init --name demo-cluster --nodes "alice bob"
+........
+
+The initialization tool will now ask a series of questions about the
+configuration, and then proceed to configure and start the cluster
+on both nodes.
+
+== Check cluster status
+
+To see if Pacemaker is running, what nodes are part of the cluster and
+what resources are active, use the `status` command:
+
+.........
+crm status
+.........
+
+If this command fails or times out, there is some problem with
+Pacemaker or Corosync on the local machine. Perhaps some dependency is
+missing, a firewall is blocking cluster communication or some other
+unrelated problem has occurred. If this is the case, the `cluster
+health` command may be of use.
+
+== Cluster health check
+
+To check the health status of the machines in the cluster, use the
+following command:
+
+........
+crm cluster health
+........
+
+This command will perform multiple diagnostics on all nodes in the
+cluster, and return information about low disk space, communication
+issues or problems with mismatching software versions between nodes,
+for example.
+
+If no cluster has been configured or there is some fundamental problem
+with cluster communications, `crmsh` may be unable to figure out what
+nodes are part of the cluster. If this is the case, the list of nodes
+can be provided to the health command directly:
+
+........
+crm cluster health nodes=alice,bob
+........
+
+== Adding a resource
+
+To test the cluster and make sure it is working properly, we can
+configure a Dummy resource. The Dummy resource agent is a simple
+resource that doesn't actually manage any software. It exposes a
+single numerical parameter called `state` which can be used to test
+the basic functionality of the cluster before introducing the
+complexities of actual resources.
+
+To configure a Dummy resource, run the following command:
+
+........
+crm configure primitive p0 Dummy
+........
+
+This creates a new resource, gives it the name `p0` and sets the
+agent for the resource to be the `Dummy` agent.
+
+`crm status` should now show the `p0` resource as started on one
+of the cluster nodes:
+
+........
+# crm status
+Last updated: Wed Jul 2 21:49:26 2014
+Last change: Wed Jul 2 21:49:19 2014
+Stack: corosync
+Current DC: alice (2) - partition with quorum
+Version: 1.1.11-c3f1a7f
+2 Nodes configured
+1 Resources configured
+
+
+Online: [ alice bob ]
+
+ p0 (ocf::heartbeat:Dummy): Started alice
+........
+
+The resource can be stopped or started using the `resource start` and
+`resource stop` commands:
+
+........
+crm resource stop p0
+crm resource start p0
+........
diff --git a/etc/crm.conf.in b/etc/crm.conf.in
new file mode 100644
index 0000000..39f8d5c
--- /dev/null
+++ b/etc/crm.conf.in
@@ -0,0 +1,120 @@
+; crmsh configuration file
+; To override per user, create a file ~/.config/crm/crm.conf
+;
+; [core]
+; editor = $EDITOR
+; pager = $PAGER
+; user =
+; skill_level = expert
+; sort_elements = yes
+; check_frequency = always
+; check_mode = strict
+; wait = no
+; add_quotes = yes
+; manage_children = ask
+; force = no
+; debug = no
+; ptest = ptest, crm_simulate
+; dotty = dotty
+; dot = dot
+; ignore_missing_metadata = no
+; report_tool_options =
+; lock_timeout = 120
+
+; set OCF_1_1_SUPPORT to yes is to fully turn on OCF 1.1 feature once the corresponding CIB detected.
+; OCF_1_1_SUPPORT = yes
+
+; obscure_pattern option is the persisent configuration of CLI.
+; Example, for the high security concern, obscure_pattern = passw* | ip
+; which makes `crm configure show` is equal to
+;
+; node-1:~ # crm configure show obscure:passw* obscure:ip
+; node 1084783297: node1
+; primitive fence_device stonith:fence_ilo5 \
+; params password="******"
+; primitive ip IPaddr2 \
+; params ip="******"
+;
+; The default option is passw*
+; If you don't want to obscure, change the value to blank.
+;
+; obscure_pattern = passw*
+
+[path]
+; sharedir = <detected>
+; cache = <detected>
+; crm_config = <detected>
+; crm_daemon_dir = <detected>
+crm_daemon_user = @CRM_DAEMON_USER@
+ocf_root = @OCF_ROOT_DIR@
+; crm_dtd_dir = <detected>
+; pe_state_dir = <detected>
+; heartbeat_dir = <detected>
+; hb_delnode = /usr/share/heartbeat/hb_delnode
+; nagios_plugins = /usr/lib/nagios/plugins
+
+; [color]
+; style = color
+; error = red bold
+; ok = green bold
+; warn = yellow bold
+; info = cyan
+; help_keyword = blue bold underline
+; help_header = normal bold
+; help_topic = yellow bold
+; help_block = cyan
+; keyword = yellow
+; identifier = normal
+; attr_name = cyan
+; attr_value = red
+; resource_reference = green
+; id_reference = green
+; score = magenta
+; ticket = magenta
+
+; [report]
+; from_time = -12H
+; compress = yes
+; speed_up = no
+; collect_extra_logs = /var/log/messages /var/log/pacemaker.log
+; remove_exist_dest = no
+; single_node = no
+;
+; sanitize_rule = sanitize_pattern[:options] ...
+;
+; This defines the way to hide sensitive data generated by crm report.
+;
+; 'sanitize_pattern' is a RegEx string, which is used to matches 'name'
+; field of CIB params. The sanitize process will hide 'value' of those
+; matched 'name:value' pairs in CIB, PE, pacemaker.log.
+;
+; 'options' is the predefined, and 'raw' is the only one defined
+; currently. With ':raw" option, the sanitize process will fetch
+; 'value' results out of CIB 'name:value' pairs, and use them to
+; hide all clear text occurence from all files crm report collected.
+;
+; Example 1:
+; sanitize_rule = passw.*
+;
+; This is the default. It will hide password nam:value pairs.
+; The result of crm report clould be like
+; name="password", value=******
+; @name=password @value=******
+; passwd=******
+;
+;
+; Example 2:
+; sanitize_rule = ip.*:raw
+;
+; This will only hide ip addresses. Example, the sanitize process will fetch
+; ip=10.10.10.10 and replace all clear text occurrence of "10.10.10.10"
+;
+;
+; Example 3:
+; sanitize_rule = passw.*|ip.*:raw
+;
+; This is useful for the higher security concern.
+; The sanitize process will hide all "name:value" pair for password like in
+; example 1, and all clear text ip addresses like in example 2 above.
+;
+; sanitize_rule = passw.*
diff --git a/etc/profiles.yml b/etc/profiles.yml
new file mode 100644
index 0000000..eca34d2
--- /dev/null
+++ b/etc/profiles.yml
@@ -0,0 +1,29 @@
+# The valid profile names are:
+# "microsoft-azure", "google-cloud-platform", "amazon-web-services", "s390", "default"
+#
+# "default" profile is loaded in the beginning.
+#
+# Those specific profile will override the corresponding values in "default"
+# profile if the specific environment is detected.
+#
+# Users could customize the "default" profile for their needs, for example,
+# those on-premise environments which is not defined yet.
+#
+# Profiles are only loaded on bootstrap init node.
+#
+# More details please see man corosync.conf, man sbd
+
+default:
+ corosync.totem.crypto_hash: sha1
+ corosync.totem.crypto_cipher: aes256
+ corosync.totem.token: 5000
+ corosync.totem.join: 60
+ corosync.totem.max_messages: 20
+ corosync.totem.token_retransmits_before_loss_const: 10
+ # sbd.msgwait is set to sbd.watchdog_timeout*2 by crmsh
+ # or, you can define your own value in profiles.yml
+ sbd.watchdog_timeout: 15
+
+microsoft-azure:
+ corosync.totem.token: 30000
+ sbd.watchdog_timeout: 60
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..8289f18
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,4 @@
+[pytest]
+python_files = test_*.py
+testpaths = crmsh
+norecursedirs =
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..d7a8e39
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,3 @@
+lxml
+PyYAML
+python-dateutil
diff --git a/scripts/apache/main.yml b/scripts/apache/main.yml
new file mode 100644
index 0000000..9af548d
--- /dev/null
+++ b/scripts/apache/main.yml
@@ -0,0 +1,69 @@
+# Copyright (C) 2009 Dejan Muhamedagic
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Server
+shortdesc: Apache Webserver
+longdesc: |
+ Configure a resource group containing a virtual IP address and
+ an instance of the Apache web server.
+
+ You can optionally configure a file system resource which will be
+ mounted before the web server is started.
+
+ You can also optionally configure a database resource which will
+ be started before the web server but after mounting the optional
+ file system.
+include:
+ - agent: ocf:heartbeat:apache
+ name: apache
+ longdesc: |
+ The Apache configuration file specified here must be available via the
+ same path on all cluster nodes, and Apache must be configured with
+ mod_status enabled. If in doubt, try running Apache manually via
+ its init script first, and ensure http://localhost:80/server-status is
+ accessible.
+ ops: |
+ op start timeout="40"
+ op stop timeout="60"
+ op monitor interval="10" timeout="20"
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the Apache instance.
+ parameters:
+ - name: id
+ value: "{{id}}-vip"
+ - script: filesystem
+ shortdesc: Optional file system mounted before the web server is started.
+ required: false
+ - script: database
+ shortdesc: Optional database started before the web server is started.
+ required: false
+parameters:
+ - name: install
+ type: boolean
+ shortdesc: Install and configure apache
+ value: false
+actions:
+ - install:
+ - apache2
+ shortdesc: Install the apache package
+ when: install
+ - service:
+ - apache: disable
+ shortdesc: Let cluster manage apache
+ when: install
+ - call: a2enmod status; true
+ shortdesc: Enable status module
+ when: install
+ sudo: true
+ - include: filesystem
+ - include: database
+ - include: virtual-ip
+ - include: apache
+ - cib: |
+ group g-{{id}}
+ {{filesystem:id}}
+ {{database:id}}
+ {{virtual-ip:id}}
+ {{id}}
diff --git a/scripts/check-uptime/fetch.py b/scripts/check-uptime/fetch.py
new file mode 100755
index 0000000..c1cceff
--- /dev/null
+++ b/scripts/check-uptime/fetch.py
@@ -0,0 +1,7 @@
+#!/usr/bin/python3
+import crm_script
+try:
+ uptime = open('/proc/uptime').read().split()[0]
+ crm_script.exit_ok(uptime)
+except Exception as e:
+ crm_script.exit_fail("Couldn't open /proc/uptime: %s" % (e))
diff --git a/scripts/check-uptime/main.yml b/scripts/check-uptime/main.yml
new file mode 100644
index 0000000..d37f712
--- /dev/null
+++ b/scripts/check-uptime/main.yml
@@ -0,0 +1,19 @@
+version: 2.2
+category: Script
+shortdesc: Check uptime of nodes
+longdesc: >
+ Fetches the uptime of all nodes and reports which
+ node has lived longest.
+
+parameters:
+ - name: show_all
+ shortdesc: Show all uptimes
+ type: boolean
+ value: false
+
+actions:
+ - shortdesc: Fetch uptimes
+ collect: fetch.py
+
+ - shortdesc: Report uptime
+ report: report.py
diff --git a/scripts/check-uptime/report.py b/scripts/check-uptime/report.py
new file mode 100755
index 0000000..81710c8
--- /dev/null
+++ b/scripts/check-uptime/report.py
@@ -0,0 +1,11 @@
+#!/usr/bin/python3
+import crm_script
+show_all = crm_script.is_true(crm_script.param('show_all'))
+uptimes = list(crm_script.output(1).items())
+max_uptime = '', 0.0
+for host, uptime in uptimes:
+ if float(uptime) > max_uptime[1]:
+ max_uptime = host, float(uptime)
+if show_all:
+ print("Uptimes: %s" % (', '.join("%s: %s" % v for v in uptimes)))
+print("Longest uptime is %s seconds on host %s" % (max_uptime[1], max_uptime[0]))
diff --git a/scripts/clvm-vg/main.yml b/scripts/clvm-vg/main.yml
new file mode 100644
index 0000000..846c70b
--- /dev/null
+++ b/scripts/clvm-vg/main.yml
@@ -0,0 +1,74 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: Cluster-aware LVM (auto activation)
+longdesc: |
+ Configures a resource to manage the activation of a volume
+ group. Before adding this resource, you need to proceed as below
+ to create new VG and LVs. Refer to manpage lvmlockd(8) for more
+ detailed steps.
+
+ - create VG on shared devices:
+ vgcreate --shared <vgname> <devices>
+
+ - create an LV:
+ lvcreate -an -L <size> -n <lvname> <vgname>
+
+ For LVs in shared VG, there are two activation modes: "exclusive"
+ and "shared". With the "exclusive" mode, a LV activated on one
+ host cannot be activated on another, which is the default option.
+ With the "shared" mode, a LV can be activated concurrently on
+ multiple hosts, and cluster filesystem like OCFS2 can use it.
+
+ If the resource is created with activation_mode="shared", it will
+ be added to the cLVM group resource. The cLVM group resource is
+ assumed to be named g-clvm. This is the name of the resource created
+ by the clvm wizard.
+
+parameters:
+ - name: id
+ shortdesc: Volume group instance ID
+ longdesc: Unique ID for the volume group instance in the cluster.
+ required: true
+ unique: true
+ type: resource
+ value: vg1
+
+ - name: vgname
+ shortdesc: Volume Group Name
+ longdesc: LVM volume group name.
+ required: true
+ type: string
+ value: vg1
+
+ - name: activation_mode
+ shortdesc: LVM activation mode
+ longdesc: |
+ How a VG/LV is activated in cluster, either "exclusive" (default) or "shared".
+ It depends on the filesystem you need to create on the LV to choose the
+ activation mode. For local filesystem like ext4, you need "exclusive" activation.
+ For cluster filesystem like OCFS2, you need "shared" activation.
+ required: false
+ type: string
+ value: exclusive
+
+ - name: clvm-group
+ shortdesc: cLVM Resource Group ID
+ longdesc: ID of the cLVM resource group.
+ type: resource
+ required: false
+ value: g-clvm
+
+actions:
+ - cib: |
+ primitive {{id}} ocf:heartbeat:LVM-activate
+ params vgname="{{vgname}}" vg_access_mode="lvmlockd" activation_mode="{{activation_mode}}"
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=30s timeout=90s
+
+ - crm: configure modgroup {{clvm-group}} add {{id}}
+ shortdesc: Add volume group to the cLVM group resource
+ when: activation_mode == "shared"
diff --git a/scripts/clvm/main.yml b/scripts/clvm/main.yml
new file mode 100644
index 0000000..8ecae60
--- /dev/null
+++ b/scripts/clvm/main.yml
@@ -0,0 +1,39 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: Cluster-aware LVM (lvmlockd)
+longdesc: |
+ Configure a cloned lvmlockd resource.
+
+ NB: Only one lvmlockd resource is necessary, regardless
+ of how many volume groups are managed as resources. To
+ monitor volume groups after configuring lvmlockd, the wizard
+ for activating volume groups can be used. Refer to manpage
+ of lvmlockd for more information.
+
+parameters:
+ - name: install
+ type: boolean
+ shortdesc: Install packages for lvmlockd
+ value: false
+
+actions:
+ - install:
+ - lvm2-lockd
+ shortdesc: Install the lvm2-lockd package
+ when: install
+ - cib: |
+ primitive dlm ocf:pacemaker:controld
+ op start timeout=90s
+ op stop timeout=100s
+
+ primitive lvmlockd ocf:heartbeat:lvmlockd
+ op start timeout=90s
+ op stop timeout=100s
+
+ group g-clvm dlm lvmlockd
+
+ clone c-clvm g-clvm
+ meta interleave=true ordered=true
diff --git a/scripts/cryptctl/README.md b/scripts/cryptctl/README.md
new file mode 100644
index 0000000..13322c8
--- /dev/null
+++ b/scripts/cryptctl/README.md
@@ -0,0 +1,56 @@
+# cryptctl
+
+## Intorducion
+
+The cryptctl server daemon provides a LUKS-based disk encryption. This script aims to setup an HA environment for the cryptctl-server
+
+## Prerequsits
+
+The cryptctl server needs following resources
+
+* /etc/sysconfig/cryptctl-server The configuration of the server. This will be created once the server was setup and will be only modified if the configuration will be changed like changing the administrator password. It is sufficient to copy this file to all nodes when the cluster will be created.
+* The server certificate files in the directory /etc/cryptctl/servertls/. The content of this directory will not be changed until the certifcates are valid. It is sufficient to copy these files to all nodes when the cluster will be created.
+* /var/lib/cryptctl/keydb The content of this directory must be provided on shared storage like SAN NAS or NFS server. The encryption keys will be saved here. For these directory a Filesystem resource agent will be created.
+* An IP address the cryptctl-server is listening on. An IPAddr2 resource agent will be created for this reason.
+
+## Setup
+
+### Setp cryptctl server
+As first step you have to setup the cryptctl server:
+```shell
+cryptctl init-server
+```
+
+### Create a basic cluster
+If not already done you have to setup a basic cluster with at last two nodes. It is very important that Node1 must be the server where you have confiugred the cryptctl server.
+
+```shell
+crm cluster init -i <NetDev> -A <AdminIP> -n <ClusterName> -y
+```
+
+Join the cluster from other nodes:
+```shell
+ssh <Node2>
+crm cluster join -y <Node1>
+```
+
+### Setup the resource group for the cryptctl server
+
+You can setup all needed resource agents and copy all files to all nodes whit the cryptcl crm-shell-script in one step. It is scrictly recommended to verify the setup in first step:
+
+```shell
+crm script verify cryptctl \
+ cert-path=</etc/cryptctl/servertls/certificate-name> \
+ cert-key-path=</etc/cryptctl/servertls/certificate-key-name> \
+ virtual-ip:ip=<IP-Address> \
+ filesystem:device=<Path to the device>
+```
+
+If the check was succesfull you have to setup the cluster group by running the script:
+```shell
+crm script run cryptctl \
+ cert-path=</etc/cryptctl/servertls/certificate-name> \
+ cert-key-path=</etc/cryptctl/servertls/certificate-key-name> \
+ virtual-ip:ip=<IP-Address> \
+ filesystem:device=<Path to the device>
+```
diff --git a/scripts/cryptctl/main.yml b/scripts/cryptctl/main.yml
new file mode 100644
index 0000000..eabf88c
--- /dev/null
+++ b/scripts/cryptctl/main.yml
@@ -0,0 +1,70 @@
+# Copyright (C) 2022 Peter Varkoly
+# License: GNU General Public License (GPL)
+version: 2.2
+category: System Management
+shortdesc: A utility for setting up LUKS-based disk encryption
+longdesc: |
+ Configure a resource group containing a virtual IP address,
+ a filesystem resource containing the disk encryption keys and records,
+ and a systemd instance of the cryptctl server.
+
+ Furthermore a resource group will be created to bind all resources on the same node.
+parameters:
+ - name: id
+ shortdesc: ID of the resource group
+ value: cryptctl
+ - name: cert-path
+ shortdesc: The path to the created certificate
+ required: true
+ - name: cert-key-path
+ shortdesc: The path to the created certificate key
+ required: true
+
+include:
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the cryptctl instance.
+ required: true
+ parameters:
+ - name: id
+ value: "{{id}}-vip"
+ - script: filesystem
+ shortdesc: Filesystem resource containing the disk encryption keys and records
+ required: true
+ parameters:
+ - name: id
+ value: "{{id}}-filesystem"
+ - name: directory
+ value: "/var/lib/cryptctl/keydb"
+ - agent: systemd:cryptctl-server
+ name: cryptctl-server
+ parameters:
+ - name: id
+ value: cryptctl-server-service
+ ops: |
+ op monitor interval=10s
+
+actions:
+ - service: "cryptctl-server:disable"
+ nodes: all
+ shortdesc: "Disable cryptctl-server service on all nodes."
+ - copy: "/etc/sysconfig/cryptctl-server"
+ to: "/etc/sysconfig/cryptctl-server"
+ nodes: all
+ shortdesc: "Copy the configuration to all nodes"
+ - copy: "{{cert-path}}"
+ to: "{{cert-path}}"
+ nodes: all
+ shortdesc: "Copy the certificat file to all nodes"
+ - copy: "{{cert-key-path}}"
+ to: "{{cert-key-path}}"
+ nodes: all
+ shortdesc: "Copy the certificat key file to all nodes"
+ - include: virtual-ip
+ - include: filesystem
+ - include: cryptctl-server
+ - cib: |
+ group group-{{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{cryptctl-server:id}}
+
diff --git a/scripts/database/main.yml b/scripts/database/main.yml
new file mode 100644
index 0000000..749ede7
--- /dev/null
+++ b/scripts/database/main.yml
@@ -0,0 +1,34 @@
+version: 2.2
+category: Database
+shortdesc: MySQL/MariaDB Database
+longdesc: >
+ Configure a MySQL or MariaDB SQL Database.
+ Enable the install option to install the necessary
+ packages for the database.
+include:
+ - agent: ocf:heartbeat:mysql
+ name: database
+ parameters:
+ - name: test_table
+ value: ""
+ ops: |
+ op start timeout=120s
+ op stop timeout=120s
+ op monitor interval=20s timeout=30s
+
+parameters:
+ - name: install
+ shortdesc: Enable to install required packages
+ type: boolean
+ value: false
+
+actions:
+ - install: mariadb
+ shortdesc: Install packages
+ when: install
+ - service:
+ - name: mysql
+ action: disable
+ shortdesc: Let cluster manage the database
+ when: install
+ - include: database
diff --git a/scripts/db2-hadr/main.yml b/scripts/db2-hadr/main.yml
new file mode 100644
index 0000000..9179b70
--- /dev/null
+++ b/scripts/db2-hadr/main.yml
@@ -0,0 +1,43 @@
+version: 2.2
+category: Database
+shortdesc: IBM DB2 Database with HADR
+longdesc: >-
+ Configure an IBM DB2 database resource as active/passive HADR,
+ along with a Virtual IP.
+
+include:
+ - agent: ocf:heartbeat:db2
+ parameters:
+ - name: id
+ required: true
+ shortdesc: DB2 Resource ID
+ longdesc: Unique ID for the database resource in the cluster.
+ type: string
+ value: db2-database
+ - name: instance
+ required: true
+ type: string
+ value: db2inst1
+ - name: dblist
+ value: db1
+ ops: |
+ op start interval="0" timeout="130"
+ op stop interval="0" timeout="120"
+ op promote interval="0" timeout="120"
+ op demote interval="0" timeout="120"
+ op monitor interval="30" timeout="60"
+ op monitor interval="45" role="Master" timeout="60"
+
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the DB2 instance.
+ parameters:
+ - name: id
+ value: db2-virtual-ip
+actions:
+ - include: virtual-ip
+ - include: db2
+ - cib: |
+ ms ms-{{db2:id}} {{db2:id}}
+ meta target-role=Stopped notify=true
+ colocation {{virtual-ip:id}}-with-master inf: {{virtual-ip:id}}:Started ms-{{db2:id}}:Master
+ order {{virtual-ip:id}}-after-master Mandatory: ms-{{db2:id}}:promote {{virtual-ip:id}}:start
diff --git a/scripts/db2/main.yml b/scripts/db2/main.yml
new file mode 100644
index 0000000..95e7461
--- /dev/null
+++ b/scripts/db2/main.yml
@@ -0,0 +1,45 @@
+version: 2.2
+category: Database
+shortdesc: IBM DB2 Database
+longdesc: >-
+ Configure an IBM DB2 database resource, along with a Virtual IP and a file system mount point.
+
+ Note that the file system resource will be stopped initially, in case you need to run mkfs.
+
+include:
+ - agent: ocf:heartbeat:db2
+ parameters:
+ - name: id
+ required: true
+ shortdesc: DB2 Resource ID
+ longdesc: Unique ID for the database resource in the cluster.
+ type: string
+ value: db2-database
+ - name: instance
+ required: true
+ type: string
+ value: db2inst1
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the DB2 instance.
+ parameters:
+ - name: id
+ value: db2-virtual-ip
+ - script: filesystem
+ shortdesc: The file system configured here will be mounted before the DB2 instance.
+ parameters:
+ - name: id
+ value: db2-fs
+ - name: fstype
+ value: xfs
+ - name: directory
+ value: "/db2/db2inst1"
+actions:
+ - include: virtual-ip
+ - include: filesystem
+ - include: db2
+ - cib: |
+ group g-{{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{id}}
+ meta target-role=Stopped
diff --git a/scripts/drbd/main.yml b/scripts/drbd/main.yml
new file mode 100644
index 0000000..29ba472
--- /dev/null
+++ b/scripts/drbd/main.yml
@@ -0,0 +1,41 @@
+version: 2.2
+category: File System
+shortdesc: DRBD Block Device
+longdesc: >-
+ Distributed Replicated Block Device. Configure a DRBD cluster resource.
+
+ Also creates a multistate resource managing the state of DRBD.
+
+ Does not create or modify the referenced DRBD configuration.
+
+parameters:
+ - name: id
+ shortdesc: DRBD Cluster Resource ID
+ required: true
+ value: drbd-data
+ type: resource
+ - name: drbd_resource
+ shortdesc: DRBD Resource Name
+ required: true
+ value: drbd0
+ type: string
+ - name: drbdconf
+ value: "/etc/drbd.conf"
+ - name: install
+ type: boolean
+ shortdesc: Install packages for DRBD
+ value: false
+
+actions:
+ - install: drbd drbd-kmp-default
+ shortdesc: Install packages for DRBD
+ when: install
+ - cib: |
+ primitive {{id}} ocf:linbit:drbd
+ params
+ drbd_resource="{{drbd_resource}}"
+ drbdconf="{{drbdconf}}"
+ op monitor interval="29s" role="Master"
+ op monitor interval="31s" role="Slave"
+ ms ms-{{id}} {{id}}
+ meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
diff --git a/scripts/exportfs/main.yml b/scripts/exportfs/main.yml
new file mode 100644
index 0000000..6dff8f2
--- /dev/null
+++ b/scripts/exportfs/main.yml
@@ -0,0 +1,37 @@
+version: 2.2
+shortdesc: "NFS Exported File System"
+category: NFS
+include:
+ - agent: ocf:heartbeat:exportfs
+ parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Cluster Resource ID
+ type: resource
+ value: exportfs
+ - name: fsid
+ shortdesc: Unique FSID Within Cluster or Starting FSID for Multiple Exports
+ required: true
+ type: integer
+ value: 1
+ - name: directory
+ required: true
+ type: string
+ shortdesc: Mount Point (Directory)
+ longdesc: "The mount point for the file system, e.g.: /srv/nfs/home"
+ - name: options
+ required: true
+ shortdesc: Mount Options
+ longdesc: "Any additional options to be given to the mount command, for example rw,mountpoint"
+ type: string
+ - name: wait_for_leasetime_on_stop
+ required: false
+ shortdesc: Wait for Lease Time on Stop
+ longdesc: If set to true, wait for lease on stop.
+ type: boolean
+ value: true
+ ops: |
+ op monitor interval=30s
+actions:
+ - include: exportfs
diff --git a/scripts/filesystem/main.yml b/scripts/filesystem/main.yml
new file mode 100644
index 0000000..b37cf15
--- /dev/null
+++ b/scripts/filesystem/main.yml
@@ -0,0 +1,30 @@
+version: 2.2
+category: File System
+shortdesc: File System (mount point)
+include:
+ - agent: ocf:heartbeat:Filesystem
+ name: filesystem
+ parameters:
+ - name: id
+ required: true
+ type: resource
+ - name: device
+ required: true
+ type: string
+ - name: directory
+ required: true
+ type: string
+ - name: fstype
+ required: true
+ type: string
+ - name: options
+ required: false
+ type: string
+ ops: |
+ meta target-role=Stopped
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=20s timeout=40s
+
+actions:
+ - include: filesystem
diff --git a/scripts/gfs2-base/main.yml b/scripts/gfs2-base/main.yml
new file mode 100644
index 0000000..47afe0b
--- /dev/null
+++ b/scripts/gfs2-base/main.yml
@@ -0,0 +1,27 @@
+# Copyright (C) 2009 Andrew Beekhof
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Script
+shortdesc: GFS2 File System Base (Cloned)
+longdesc: |
+ This template generates a cloned instance of the GFS2 file system.
+ The file system should be on the device, unless cLVM is used.
+
+parameters:
+ - name: clvm-group
+ shortdesc: cLVM Resource Group ID
+ longdesc: Optional ID of a cLVM resource group.
+ required: False
+
+actions:
+ - cib: |
+ primitive gfs-controld ocf:pacemaker:controld
+
+ clone c-gfs gfs-controld
+ meta interleave=true ordered=true
+
+ - crm: configure modgroup {{clvm-group}} add c-gfs
+ shortdesc: Add gfs controld to cLVM group
+ when: clvm-group
diff --git a/scripts/gfs2/main.yml b/scripts/gfs2/main.yml
new file mode 100644
index 0000000..673cd06
--- /dev/null
+++ b/scripts/gfs2/main.yml
@@ -0,0 +1,62 @@
+# Copyright (C) 2009 Andrew Beekhof
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+shortdesc: GFS2 File System (Cloned)
+longdesc: >-
+ This template generates a cloned instance of the GFS2 file system.
+ The file system should be on the device, unless cLVM is used.
+
+category: File System
+parameters:
+ - name: id
+ shortdesc: File System Resource ID
+ longdesc: "NB: The clone is going to be named c-<id> (e.g. c-bigfs)"
+ example: bigfs
+ required: true
+ type: resource
+ - name: directory
+ shortdesc: Mount Point
+ example: /mnt/bigfs
+ required: true
+ type: string
+ - name: device
+ shortdesc: Device
+ required: true
+ type: string
+ - name: options
+ shortdesc: Mount Options
+ type: string
+ required: false
+ - name: dlm
+ shortdesc: Create DLM Resource and Cloned Group
+ longdesc: If set, create the DLM resource and cloned resource group.
+ type: boolean
+ default: true
+ - name: group
+ shortdesc: Cloned Group Resource ID
+ longdesc: ID of cloned group
+ required: false
+ type: resource
+ default: g-dlm
+actions:
+ - when: dlm
+ cib: |
+ primitive dlm ocf:pacemaker:controld
+ op start timeout=90
+ op stop timeout=60
+ group {{group}} dlm
+ clone c-dlm {{group}} meta interleave=true
+ - cib: |
+ primitive {{id}} ocf:heartbeat:Filesystem
+ directory="{{directory}}"
+ fstype="gfs2"
+ device="{{device}}"
+ {{#options}}options="{{options}}"{{/options}}
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=20s timeout=40s
+
+ - crm: configure modgroup {{group}} add {{id}}
+ shortdesc: Add the GFS2 File System to the Cloned Group
diff --git a/scripts/haproxy/haproxy.cfg b/scripts/haproxy/haproxy.cfg
new file mode 100644
index 0000000..50141a2
--- /dev/null
+++ b/scripts/haproxy/haproxy.cfg
@@ -0,0 +1,13 @@
+global
+ maxconn 256
+ daemon
+
+defaults
+ mode http
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+listen http-in
+ bind 0.0.0.0:80
+ stats enable
diff --git a/scripts/haproxy/main.yml b/scripts/haproxy/main.yml
new file mode 100644
index 0000000..3e784c6
--- /dev/null
+++ b/scripts/haproxy/main.yml
@@ -0,0 +1,37 @@
+version: 2.2
+category: Server
+shortdesc: HAProxy
+longdesc: |
+ HAProxy is a free, very fast and reliable solution offering
+ high availability, load balancing, and proxying for TCP and
+ HTTP-based applications. It is particularly suited for very
+ high traffic web sites and powers quite a number of the
+ world's most visited ones.
+
+ NOTE: Installs a basic haproxy.cfg configuration file.
+ This will overwrite any existing haproxy.cfg.
+
+include:
+ - agent: systemd:haproxy
+ name: haproxy
+ ops: |
+ op monitor interval=10s
+
+parameters:
+ - name: install
+ type: boolean
+ value: false
+ shortdesc: Install and configure HAProxy packages
+
+actions:
+ - install: haproxy
+ nodes: all
+ when: install
+ - service: "haproxy:disable"
+ nodes: all
+ when: install
+ - copy: haproxy.cfg
+ to: /etc/haproxy/haproxy.cfg
+ nodes: all
+ when: install
+ - include: haproxy
diff --git a/scripts/health/collect.py b/scripts/health/collect.py
new file mode 100755
index 0000000..180b866
--- /dev/null
+++ b/scripts/health/collect.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python3
+from __future__ import unicode_literals
+from builtins import str
+import os
+import pwd
+import hashlib
+import platform
+import crm_script
+
+import crmsh.log
+crmsh.log.setup_logging()
+from crmsh.report import utils
+
+data = crm_script.get_input()
+
+PACKAGES = ['booth', 'cluster-glue', 'corosync', 'crmsh', 'csync2', 'drbd',
+ 'fence-agents', 'gfs2', 'gfs2-utils', 'ha-cluster-bootstrap',
+ 'haproxy', 'hawk', 'libdlm', 'libqb', 'ocfs2', 'ocfs2-tools',
+ 'pacemaker', 'pacemaker-mgmt', 'resource-agents', 'sbd']
+
+
+def rpm_info():
+ return crm_script.rpmcheck(PACKAGES)
+
+
+def logrotate_info():
+ return {}
+
+
+def get_user():
+ return pwd.getpwuid(os.getuid()).pw_name
+
+
+def sys_info():
+ sysname, nodename, release, version, machine = os.uname()
+ # The first three columns measure CPU and IO utilization of the
+ # last one, five, and 15 minute periods. The fourth column shows
+ # the number of currently running processes and the total number of
+ # processes. The last column displays the last process ID used.
+ system, node, release, version, machine, processor = platform.uname()
+ distname = utils.get_distro_info()
+ hostname = os.uname()[1]
+
+ uptime = open('/proc/uptime').read().split()
+ loadavg = open('/proc/loadavg').read().split()
+
+ return {'system': system,
+ 'node': node,
+ 'release': release,
+ 'version': version,
+ 'machine': machine,
+ 'processor': processor,
+ 'distname': distname,
+ 'user': get_user(),
+ 'hostname': hostname,
+ 'uptime': uptime[0],
+ 'idletime': uptime[1],
+ 'loadavg': loadavg[2] # 15 minute average
+ }
+
+
+def disk_info():
+ rc, out, err = crm_script.call(['df'], shell=False)
+ if rc == 0:
+ disk_use = []
+ for line in out.split('\n')[1:]:
+ line = line.strip()
+ if line:
+ data = line.split()
+ if len(data) >= 6:
+ disk_use.append((data[5], data[4]))
+ return disk_use
+ return []
+
+
+# configurations out of sync
+
+FILES = [
+ '/etc/csync2/key_hagroup',
+ '/etc/csync2/csync2.cfg',
+ '/etc/corosync/corosync.conf',
+ '/etc/sysconfig/sbd',
+ '/etc/sysconfig/SuSEfirewall2',
+ '/etc/sysconfig/SuSEfirewall2.d/services/cluster'
+ ]
+
+
+def files_info():
+ ret = {}
+ for f in FILES:
+ if os.path.isfile(f):
+ try:
+ ret[f] = hashlib.sha1(open(f).read().encode('utf-8')).hexdigest()
+ except IOError as e:
+ ret[f] = "error: %s" % (e)
+ else:
+ ret[f] = ""
+ return ret
+
+
+try:
+ data = {
+ 'rpm': rpm_info(),
+ 'logrotate': logrotate_info(),
+ 'system': sys_info(),
+ 'disk': disk_info(),
+ 'files': files_info()
+ }
+ crm_script.exit_ok(data)
+except Exception as e:
+ crm_script.exit_fail(str(e))
diff --git a/scripts/health/hahealth.py b/scripts/health/hahealth.py
new file mode 100755
index 0000000..f46aec6
--- /dev/null
+++ b/scripts/health/hahealth.py
@@ -0,0 +1,40 @@
+#!/usr/bin/python3
+import os
+import crm_script as crm
+
+
+if not os.path.isfile('/usr/sbin/crm') and not os.path.isfile('/usr/bin/crm'):
+ # crm not installed
+ crm.exit_ok({'status': 'crm not installed'})
+
+
+def get_from_date():
+ rc, out, err = crm.call("date '+%F %H:%M' --date='1 day ago'", shell=True)
+ return out.strip()
+
+
+def create_report():
+ cmd = ['crm', 'report',
+ '-f', get_from_date(),
+ '-D', '-Z', 'health-report']
+ rc, out, err = crm.call(cmd, shell=False)
+ return rc == 0
+
+
+if not create_report():
+ crm.exit_ok({'status': 'Failed to create report'})
+
+
+def extract_report():
+ rc, out, err = crm.call(['tar', 'xjf', 'health-report.tar.bz2'], shell=False)
+ return rc == 0
+
+
+if not extract_report():
+ crm.exit_ok({'status': 'Failed to extract report'})
+
+analysis = ''
+if os.path.isfile('health-report/analysis.txt'):
+ analysis = open('health-report/analysis.txt').read()
+
+crm.exit_ok({'status': 'OK', 'analysis': analysis})
diff --git a/scripts/health/main.yml b/scripts/health/main.yml
new file mode 100644
index 0000000..7c59bdd
--- /dev/null
+++ b/scripts/health/main.yml
@@ -0,0 +1,16 @@
+version: 2.2
+category: Basic
+shortdesc: Verify health and configuration
+longdesc: |
+ Checks and detects issues with the cluster, by creating and
+ analysing a cluster report.
+
+ Requires SSH access between cluster nodes. This command is
+ also available from the command line as "crm cluster health".
+actions:
+ - collect: collect.py
+ shortdesc: Collect information
+ - apply_local: hahealth.py
+ shortdesc: Run cluster health check
+ - report: report.py
+ shortdesc: Report cluster state
diff --git a/scripts/health/report.py b/scripts/health/report.py
new file mode 100755
index 0000000..51e11d2
--- /dev/null
+++ b/scripts/health/report.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python3
+import os
+import crm_script
+data = crm_script.get_input()
+health_report = data[1]
+
+print("Processing collected information...")
+
+CORE_PACKAGES = ['corosync', 'pacemaker', 'resource-agents']
+
+warnings = []
+errors = []
+
+
+def warn(fmt, *args):
+ warnings.append(fmt % args)
+
+
+def error(fmt, *args):
+ errors.append(fmt % args)
+
+
+# sort {package: {version: [host]}}
+rpm_versions = {}
+
+LOW_UPTIME = 60.0
+HIGH_LOAD = 1.0
+
+for node, info in health_report.items():
+ if node != info['system']['hostname']:
+ error("Hostname mismatch: %s is not %s" %
+ (node, info['system']['hostname']))
+
+ if float(info['system']['uptime']) < LOW_UPTIME:
+ warn("%s: Uptime is low: %ss" % (node, info['system']['uptime']))
+
+ if float(info['system']['loadavg']) > HIGH_LOAD:
+ warn("%s: 15 minute load average is %s" % (node, info['system']['loadavg']))
+
+ for rpm in info['rpm']:
+ if 'error' in rpm:
+ if rpm['name'] not in rpm_versions:
+ rpm_versions[rpm['name']] = {rpm['error']: [node]}
+ else:
+ versions = rpm_versions[rpm['name']]
+ if rpm['error'] in versions:
+ versions[rpm['error']].append(node)
+ else:
+ versions[rpm['error']] = [node]
+ else:
+ if rpm['name'] not in rpm_versions:
+ rpm_versions[rpm['name']] = {rpm['version']: [node]}
+ else:
+ versions = rpm_versions[rpm['name']]
+ if rpm['version'] in versions:
+ versions[rpm['version']].append(node)
+ else:
+ versions[rpm['version']] = [node]
+ for disk, use in info['disk']:
+ use = int(use[:-1])
+ if use > 90:
+ warn("On %s, disk %s usage is %s%%", node, disk, use)
+
+ for logfile, state in info['logrotate'].items():
+ if not state:
+ warn("%s: No log rotation configured for %s" % (node, logfile))
+
+for cp in CORE_PACKAGES:
+ if cp not in rpm_versions:
+ error("Core package '%s' not installed on any node", cp)
+
+for name, versions in rpm_versions.items():
+ if len(versions) > 1:
+ desc = ', '.join('%s (%s)' % (v, ', '.join(nodes)) for v, nodes in list(versions.items()))
+ warn("Package %s: Versions differ! %s", name, desc)
+
+ all_hosts = set(sum([hosts for hosts in list(versions.values())], []))
+ for node in list(health_report.keys()):
+ if len(all_hosts) > 0 and node not in all_hosts:
+ warn("Package '%s' not installed on host '%s'" % (name, node))
+
+
+def compare_system(systems):
+ def check(value, msg):
+ vals = set([system[value] for host, system in systems])
+ if len(vals) > 1:
+ info = ', '.join('%s: %s' % (h, system[value]) for h, system in systems)
+ warn("%s: %s" % (msg, info))
+
+ check('machine', 'Architecture differs')
+ check('release', 'Kernel release differs')
+ check('distname', 'Distribution differs')
+ check('distver', 'Distribution version differs')
+ # check('version', 'Kernel version differs')
+
+
+def compare_files(systems):
+ keys = set()
+ for host, files in systems:
+ keys.update(list(files.keys()))
+ for filename in keys:
+ vals = set([files.get(filename) for host, files in systems])
+ if len(vals) > 1:
+ info = ', '.join('%s: %s' % (h, files.get(filename)) for h, files in systems)
+ warn("%s: %s" % ("Files differ", info))
+
+
+compare_system((h, info['system']) for h, info in health_report.items())
+compare_files((h, info['files']) for h, info in health_report.items())
+
+if crm_script.output(2):
+ report = crm_script.output(2)
+ status = report.get('status')
+ analysis = report.get('analysis')
+ if status and not analysis:
+ warn("Cluster report: %s" % (status))
+ elif analysis:
+ print("INFO: Cluster report:")
+ print(analysis)
+ else:
+ warn("No cluster report generated")
+
+if errors:
+ for e in errors:
+ print("ERROR:", e)
+if warnings:
+ for w in warnings:
+ print("WARNING:", w)
+
+if not errors and not warnings:
+ print("No issues found.")
+
+workdir = os.path.dirname(crm_script.__file__)
+print("\nINFO: health-report in directory \"%s\"" % workdir)
diff --git a/scripts/libvirt/main.yml b/scripts/libvirt/main.yml
new file mode 100644
index 0000000..d982d9f
--- /dev/null
+++ b/scripts/libvirt/main.yml
@@ -0,0 +1,66 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+shortdesc: STONITH for libvirt (kvm / Xen)
+longdesc: >
+ Uses libvirt as a STONITH device to fence a guest node.
+ Create a separate resource for each guest node in the cluster.
+
+ Note that the recommended fencing mechanism is SBD whenever
+ a shared storage device (like a SAN) is available.
+category: Stonith
+parameters:
+ - name: id
+ shortdesc: Resource ID (Name)
+ example: stonith-libvirt
+ required: true
+ type: resource
+ - name: target
+ shortdesc: Node to Manage With STONITH Device
+ type: resource
+ required: true
+ - name: hostlist
+ shortdesc: "List of controlled hosts: hostname[:domain_id].."
+ longdesc: >
+ The optional domain_id defaults to the hostname.
+ type: string
+ required: true
+ - name: hypervisor_uri
+ longdesc: >
+ URI for connection to the hypervisor.
+ driver[+transport]://[username@][hostlist][:port]/[path][?extraparameters]
+ e.g.
+ qemu+ssh://my_kvm_server.mydomain.my/system (uses ssh for root)
+ xen://my_kvm_server.mydomain.my/ (uses TLS for client)
+
+ virsh must be installed (e.g. libvirt-client package) and access control must
+ be configured for your selected URI.
+ example: qemu+ssh://my_kvm_server.example.com/system
+ required: true
+ - name: reset_method
+ required: false
+ example: power_cycle
+ type: string
+ shortdesc: Guest Reset Method
+ longdesc: >
+ A guest reset may be done by a sequence of off and on commands
+ (power_cycle) or by the reboot command. Which method works
+ depend on the hypervisor and guest configuration management.
+ - name: install
+ shortdesc: Enable to Install Required Packages
+ type: boolean
+ required: false
+ value: false
+actions:
+ - install: cluster-glue libvirt-client
+ nodes: all
+ when: install
+ - cib: |
+ primitive {{id}}-{{target}} stonith:external/libvirt
+ params
+ hostlist="{{hostlist}}"
+ hypervisor_uri="{{hypervisor_uri}}"
+ {{#reset_method}}reset_method="{{reset_method}}"{{/reset_method}}
+ op start timeout=60s
+ location l-{{id}}-{{target}} {{id}}-{{target}} -inf: {{target}}
diff --git a/scripts/lvm-drbd/main.yml b/scripts/lvm-drbd/main.yml
new file mode 100644
index 0000000..f435be7
--- /dev/null
+++ b/scripts/lvm-drbd/main.yml
@@ -0,0 +1,62 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: LVM Group on DRBD
+longdesc: |
+ Configure a LVM resource group on top of DRBD.
+
+ A DRBD primitive and Multi-state resource is used to replicate
+ data between the nodes.
+
+ LVM and file system resources are used to make the file systems
+ available on the Primary node.
+
+ For more details on what needs to be prepared to use
+ this wizard, see the Highly Available NFS Storage with
+ DRBD and Pacemaker section of the SUSE Linux Enterprise
+ High Availability Extension 12 SP1 documentation.
+
+parameters:
+ - name: group_id
+ type: resource
+ required: true
+ shortdesc: Group Resource ID
+ value: g-lvm
+
+include:
+ - name: drbd
+ script: drbd
+ required: true
+ parameters:
+ - name: drbd_resource
+ value: vg1
+
+ - name: lvm
+ script: lvm
+ required: true
+ parameters:
+ - name: volgrpname
+ value: vg1
+
+ - name: example_fs
+ shortdesc: Example File System Resource
+ script: filesystem
+ required: false
+ parameters:
+ - name: device
+ value: /dev/example
+ - name: directory
+ value: /srv/example
+ - name: fstype
+ value: xfs
+
+actions:
+ - include: drbd
+ - include: lvm
+ - shortdesc: Configure LVM and File System Group and Constraints
+ cib: |
+ group {{group_id}} {{lvm:id}} {{#example_fs:id}}{{example_fs:id}}{{/example_fs:id}}
+ order o-drbd_before_{{group_id}} Mandatory: ms-{{drbd:id}}:promote {{group_id}}:start
+ colocation c-{{group_id}}_on_drbd inf: {{group_id}} ms-{{drbd:id}}:Master
diff --git a/scripts/lvm/main.yml b/scripts/lvm/main.yml
new file mode 100644
index 0000000..5f87cb0
--- /dev/null
+++ b/scripts/lvm/main.yml
@@ -0,0 +1,21 @@
+version: 2.2
+category: Script
+longdesc: >-
+ Configure a resource for managing an LVM volume group.
+
+ Does not create the referenced volume group.
+
+include:
+ - agent: ocf:heartbeat:LVM
+ name: lvm
+ parameters:
+ - name: id
+ required: true
+ value: lvm
+ type: resource
+ - name: volgrpname
+ required: true
+ type: string
+ ops: |
+ op monitor interval=130s timeout=130s
+ op stop timeout=130s on-fail=fence
diff --git a/scripts/mailto/main.yml b/scripts/mailto/main.yml
new file mode 100644
index 0000000..bcf188e
--- /dev/null
+++ b/scripts/mailto/main.yml
@@ -0,0 +1,29 @@
+version: 2.2
+shortdesc: E-Mail
+longdesc: |
+ Notifies recipient by e-mail in the event of a resource takeover.
+category: Basic
+include:
+ - agent: ocf:heartbeat:MailTo
+ name: mailto
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: email
+ type: email
+ required: true
+ - name: subject
+ type: string
+ required: false
+ ops: |
+ op start timeout="10"
+ op stop timeout="10"
+ op monitor interval="10" timeout="10"
+actions:
+ - install:
+ - mailx
+ shortdesc: Ensure mail package is installed
+ - include: mailto
+ - cib: |
+ clone c-{{id}} {{id}}
diff --git a/scripts/nfsserver-lvm-drbd/main.yml b/scripts/nfsserver-lvm-drbd/main.yml
new file mode 100644
index 0000000..ee4a93e
--- /dev/null
+++ b/scripts/nfsserver-lvm-drbd/main.yml
@@ -0,0 +1,137 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: NFS
+shortdesc: NFS Server on LVM and DRBD
+longdesc: |
+ Configure a highly available two-node NFS server on top of
+ LVM and DRBD.
+
+ A DRBD primitive and Multi-state resource is used to replicate
+ data between the nodes.
+
+ A NFS Kernel service resource ensures that the NFS server daemons
+ are always available.
+
+ LVM and file system resources are used to make the file systems
+ available on the Primary node.
+
+ A virtual NFS root export is needed for NFSv4 clients.
+
+ An example NFS export is configured, corresponding to a file system
+ mounted from the LVM logical volume.
+
+ Finally, a floating IP address resource allows clients to connect to
+ the service regardless of which physical node is primary.
+
+ For more details on what needs to be prepared to use
+ this wizard, see the Highly Available NFS Storage with
+ DRBD and Pacemaker section of the SUSE Linux Enterprise
+ High Availability Extension documentation.
+
+parameters:
+ - name: nfsserver_id
+ type: resource
+ value: nfsserver
+ shortdesc: ID for the NFS Server cluster resource
+ required: true
+
+include:
+ - name: drbd
+ script: drbd
+ required: true
+ parameters:
+ - name: drbd_resource
+ value: nfs
+
+ - name: lvm
+ script: lvm
+ required: true
+ parameters:
+ - name: volgrpname
+ value: nfs
+
+ - name: example_fs
+ shortdesc: Example File System Resource
+ script: filesystem
+ required: false
+ parameters:
+ - name: device
+ value: /dev/nfs/example
+ - name: directory
+ value: /srv/nfs/example
+ - name: fstype
+ value: xfs
+
+ - name: rootfs
+ script: exportfs
+ required: false
+ shortdesc: NFSv4 Virtual File System root.
+ parameters:
+ - name: id
+ value: exportfs-root
+ - name: fsid
+ value: 0
+ - name: directory
+ value: /srv/nfs
+ - name: options
+ value: "rw,crossmnt"
+
+ - script: exportfs
+ required: false
+ shortdesc: Exported NFS mount point.
+ parameters:
+ - name: id
+ value: exportfs
+ - name: directory
+ value: /srv/nfs/example
+ - name: options
+ value: "rw,mountpoint"
+ - name: wait_for_leasetime_on_stop
+ value: true
+
+ - script: virtual-ip
+ required: false
+ shortdesc: Configure a Virtual IP address used to access the NFS mounts.
+
+actions:
+ - shortdesc: Ensure NFS packages are installed
+ install: nfs-client nfs-kernel-server nfs-utils
+
+ - shortdesc: Configure cluster resource for the NFS server
+ cib: |
+ primitive {{nfsserver_id}} \
+ systemd:nfs-server \
+ op monitor interval=30s \
+ clone cl-{{nfsserver_id}} {{nfsserver_id}}
+
+ - include: drbd
+ - include: lvm
+
+ - shortdesc: Configure LVM and File System Group and Constraints
+ cib: |
+ group g-nfs {{lvm:id}} {{#example_fs:id}}{{example_fs:id}}{{/example_fs:id}}
+ order o-drbd_before_nfs Mandatory: ms-{{drbd:id}}:promote g-nfs:start
+ colocation c-nfs_on_drbd inf: g-nfs ms-{{drbd:id}}:Master
+
+ - include: rootfs
+
+ - shortdesc: Clone Root FS Resource and Configure Constraints
+ cib: |
+ clone cl-{{rootfs:id}} {{rootfs:id}}
+ order o-root_before_nfs Mandatory: cl-{{rootfs:id}} g-nfs:start
+ colocation c-nfs_on_root inf: g-nfs cl-{{rootfs:id}}
+ when: rootfs
+ - include: exportfs
+ - shortdesc: Add ExportFS Resource to Group
+ crm: "configure modgroup g-nfs add {{exportfs:id}}"
+ when: exportfs
+ - include: virtual-ip
+ - shortdesc: Add Floating IP Address to Group
+ crm: "configure modgroup g-nfs add {{virtual-ip:id}}"
+ when: virtual-ip
+ - call: /usr/sbin/exportfs -v
+ error: Failed to configure NFS exportfs
+ shortdesc: Check Result of exportfs -v
+ sudo: true
diff --git a/scripts/nfsserver/main.yml b/scripts/nfsserver/main.yml
new file mode 100644
index 0000000..6544bf1
--- /dev/null
+++ b/scripts/nfsserver/main.yml
@@ -0,0 +1,74 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: NFS
+shortdesc: NFS Server
+longdesc: |
+ Configure an NFS server. Requires an existing file system resource,
+ for example a file system running on LVM on DRBD.
+
+parameters:
+ - name: base-id
+ required: true
+ shortdesc: Base File System Resource ID
+ longdesc: The ID of an existing file system resource.
+ type: resource
+ value: base-fs
+
+include:
+ - name: rootfs
+ script: exportfs
+ required: false
+ shortdesc: NFSv4 Virtual File System Root
+ parameters:
+ - name: id
+ value: exportfs-root
+ - name: fsid
+ value: 0
+ - name: directory
+ value: /srv/nfs
+ - name: options
+ value: "rw,crossmnt"
+
+ - script: exportfs
+ required: true
+ shortdesc: Exported NFS Mount Point
+ parameters:
+ - name: id
+ value: exportfs
+ - name: directory
+ value: /srv/nfs/example
+ - name: options
+ value: "rw,mountpoint"
+ - name: wait_for_leasetime_on_stop
+ value: true
+
+ - script: virtual-ip
+ required: false
+ shortdesc: Virtual IP Address Used to Access the NFS Mounts
+
+actions:
+ - crm: "configure show {{base-id}}"
+ shortdesc: Ensure That the File System Resource Exists
+ - install: nfs-client nfs-kernel-server nfs-utils
+ shortdesc: Install NFS Packages
+ - service:
+ - nfsserver: enable
+ - nfsserver: start
+ - include: rootfs
+ - include: exportfs
+ - include: virtual-ip
+ - cib: |
+ group g-nfs {{exportfs:id}} {{virtual-ip:id}}
+ order base-then-nfs Mandatory: {{base-id}} g-nfs
+ colocation nfs-with-base inf: g-nfs {{base-id}}
+ {{#rootfs}}
+ clone c-{{rootfs:id}} {{rootfs:id}}
+ order rootfs-before-nfs Mandatory: c-{{rootfs:id}} g-nfs:start
+ colocation nfs-with-rootfs inf: g-nfs c-{{rootfs:id}}
+ {{/rootfs}}
+ - call: /usr/sbin/exportfs -v
+ error: Failed to configure NFS exportfs
+ shortdesc: Check Result of exportfs -v
+ sudo: true
diff --git a/scripts/nginx/main.yml b/scripts/nginx/main.yml
new file mode 100644
index 0000000..59c8d1f
--- /dev/null
+++ b/scripts/nginx/main.yml
@@ -0,0 +1,63 @@
+# Copyright (C) 2017 Xin Liang
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Server
+shortdesc: Nginx Webserver
+longdesc: |
+ Configure a resource group containing a virtual IP address and
+ an instance of the Nginx web server.
+
+ You can optionally configure a file system resource which will be
+ mounted before the web server is started.
+
+ You can also optionally configure a database resource which will
+ be started before the web server but after mounting the optional
+ file system.
+include:
+ - agent: ocf:heartbeat:nginx
+ name: nginx
+ longdesc: |
+ The Nginx configuration file specified here must be available via the
+ same path on all cluster nodes; And nginx.service should be disabled on
+ all cluster nodes; And "server_name" option in nginx configure file
+ should be related with virtual IP.
+ ops: |
+ op start timeout="40"
+ op stop timeout="60"
+ op monitor interval="10" timeout="30"
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the Nginx instance.
+ parameters:
+ - name: id
+ value: "{{id}}-vip"
+ - script: filesystem
+ shortdesc: Optional file system mounted before the web server is started.
+ required: false
+ - script: database
+ shortdesc: Optional database started before the web server is started.
+ required: false
+parameters:
+ - name: install
+ type: boolean
+ shortdesc: Install and configure nginx
+ value: false
+actions:
+ - install:
+ - nginx
+ shortdesc: Install the nginx package
+ when: install
+ - service:
+ - apache: disable
+ shortdesc: Let cluster manage nginx
+ when: install
+ - include: filesystem
+ - include: database
+ - include: virtual-ip
+ - include: nginx
+ - cib: |
+ group g-{{id}}
+ {{filesystem:id}}
+ {{database:id}}
+ {{virtual-ip:id}}
+ {{id}}
diff --git a/scripts/ocfs2/main.yml b/scripts/ocfs2/main.yml
new file mode 100644
index 0000000..c3000dd
--- /dev/null
+++ b/scripts/ocfs2/main.yml
@@ -0,0 +1,76 @@
+# Copyright (C) 2009 Dejan Muhamedagic
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: OCFS2 File System
+longdesc: |
+ Configure an OCFS2 File System resource and add
+ it to a cloned DLM base group. OCFS2 uses the
+ cluster membership services from Pacemaker which
+ run in user space. Therefore, DLM needs to be
+ configured as a clone resource that is present on
+ each node in the cluster.
+
+ The file system resource should be added to a cloned
+ group which includes the DLM resource. This wizard
+ can optionally create both the required DLM resource
+ and the cloned group. The wizard can be reused to create
+ additional OCFS2 file system resources by setting the
+ group name to the name of an already-created cloned group.
+
+ If you are using cLVM, create the DLM resource and clone
+ group using the cLVM wizard. OCFS2 file system resources can
+ then be added to the group using this wizard.
+
+parameters:
+ - name: id
+ shortdesc: OCFS2 File System Resource ID
+ example: bigfs
+ type: resource
+ required: true
+ - name: directory
+ shortdesc: Mount Point
+ example: /mnt/bigfs
+ type: string
+ required: true
+ - name: device
+ shortdesc: Device
+ type: string
+ required: true
+ - name: options
+ shortdesc: Mount Options
+ type: string
+ - name: dlm
+ shortdesc: Create DLM Resource and Cloned Group
+ longdesc: If set, create the DLM resource and cloned resource group.
+ type: boolean
+ default: true
+ - name: group
+ shortdesc: Cloned Group Resource ID
+ longdesc: ID of cloned group
+ required: false
+ type: resource
+ default: g-dlm
+
+actions:
+ - when: dlm
+ cib: |
+ primitive dlm ocf:pacemaker:controld
+ op start timeout=90
+ op stop timeout=60
+ group {{group}} dlm
+ clone c-dlm {{group}} meta interleave=true
+ - cib: |
+ primitive {{id}} ocf:heartbeat:Filesystem
+ directory="{{directory}}"
+ fstype="ocfs2"
+ device="{{device}}"
+ {{#options}}options="{{options}}"{{/options}}
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=20s timeout=40s
+
+ - crm: configure modgroup {{group}} add {{id}}
+ shortdesc: Add the OCFS2 File System to the Cloned Group
diff --git a/scripts/oracle/main.yml b/scripts/oracle/main.yml
new file mode 100644
index 0000000..4a79258
--- /dev/null
+++ b/scripts/oracle/main.yml
@@ -0,0 +1,51 @@
+version: 2.2
+category: Database
+shortdesc: Oracle Database
+longdesc: Configure an Oracle Database cluster resource.
+parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Unique ID for the database cluster resource.
+ type: resource
+ value: oracle
+ - name: sid
+ required: true
+ shortdesc: Database SID
+ type: string
+ value: OracleDB
+ - name: listener
+ shortdesc: Listener.
+ required: true
+ type: string
+ value: LISTENER
+ - name: home
+ required: true
+ shortdesc: Database Home.
+ type: string
+ value: /srv/oracledb
+ - name: user
+ required: true
+ shortdesc: Database User.
+ type: string
+ default: oracle
+actions:
+ - cib: |
+ primitive lsn-{{id}} ocf:heartbeat:oralsnr
+ params
+ sid="{{sid}}"
+ home="{{home}}"
+ user="{{user}}"
+ listener="{{listener}}"
+ op monitor interval="30" timeout="60" depth="0"
+
+ primitive {{id}} ocf:heartbeat:oracle
+ params
+ sid="{{sid}}"
+ home="{{home}}"
+ user="{{user}}"
+ op monitor interval="120s"
+
+ colocation lsn-with-{{id}} inf: {{id}} lsn-{{id}}
+ order lsn-before-{{id}} Mandatory: lsn-{{id}} {{id}}
+ \ No newline at end of file
diff --git a/scripts/raid-lvm/main.yml b/scripts/raid-lvm/main.yml
new file mode 100644
index 0000000..405168f
--- /dev/null
+++ b/scripts/raid-lvm/main.yml
@@ -0,0 +1,25 @@
+version: 2.2
+category: File System
+shortdesc: RAID Hosting LVM
+longdesc: "Configure a RAID 1 host based mirror together with a cluster manager LVM volume group and LVM volumes."
+parameters:
+ - name: id
+ shortdesc: RAID and LVM Group ID
+ longdesc: File systems that should be mounted in the LVM can be added to this group resource.
+ type: resource
+ value: g-raid
+ required: true
+include:
+ - script: raid1
+ parameters:
+ - name: raidconf
+ value: /etc/mdadm.conf
+ type: string
+ - name: raiddev
+ value: /dev/md0
+ type: string
+ - script: lvm
+actions:
+ - include: lvm
+ - include: raid1
+ - cib: group {{id}} {{raid1:id}} {{lvm:id}} meta target-role=stopped
diff --git a/scripts/raid1/main.yml b/scripts/raid1/main.yml
new file mode 100644
index 0000000..47ff607
--- /dev/null
+++ b/scripts/raid1/main.yml
@@ -0,0 +1,17 @@
+version: 2.2
+category: Script
+include:
+ - agent: ocf:heartbeat:Raid1
+ name: raid1
+ parameters:
+ - name: id
+ required: true
+ value: raid1
+ - name: raidconf
+ required: true
+ type: string
+ - name: raiddev
+ required: true
+ type: string
+ ops: |
+ op monitor interval=60s timeout=130s on-fail=fence
diff --git a/scripts/sap-as/main.yml b/scripts/sap-as/main.yml
new file mode 100644
index 0000000..ccb857e
--- /dev/null
+++ b/scripts/sap-as/main.yml
@@ -0,0 +1,70 @@
+version: 2.2
+category: SAP
+shortdesc: SAP ASCS Instance
+longdesc: |
+ Configure a SAP ASCS instance including:
+
+ 1) Virtual IP address for the SAP ASCS instance,
+
+ 2) A file system on shared storage (/usr/sap/SID/ASCS##),
+
+ 3) SAPInstance for ASCS.
+
+parameters:
+ - name: id
+ shortdesc: SAP ASCS Resource Group ID
+ longdesc: Unique ID for the SAP ASCS instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0_sapna0as
+
+include:
+ - script: sapinstance
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_ASCS00_sapna0as
+ - name: InstanceName
+ value: NA0_ASCS00_sapna0as
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_ASCS00_sapna0as"
+ - script: virtual-ip
+ shortdesc: The Virtual IP address configured here will be for the SAP ASCS instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0as
+ - name: ip
+ value: 172.17.2.53
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ shortdesc: "File system resource for the /usr/sap/SID/ASCS## directory"
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0as
+ - name: directory
+ value: "/usr/sap/NA0/ASCS00"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: sapinstance
+ - include: virtual-ip
+ - include: filesystem
+ - cib:
+ group {{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{sapinstance:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-ci/main.yml b/scripts/sap-ci/main.yml
new file mode 100644
index 0000000..7c3468d
--- /dev/null
+++ b/scripts/sap-ci/main.yml
@@ -0,0 +1,70 @@
+version: 2.2
+category: SAP
+shortdesc: SAP Central Instance
+longdesc: |
+ Configure a SAP Central Instance including:
+
+ 1) Virtual IP address for the SAP Central instance,
+
+ 2) A file system on shared storage (/usr/sap/SID/DVEBMGS##),
+
+ 3) SAPInstance for the Central Instance.
+
+parameters:
+ - name: id
+ shortdesc: SAP Central Resource Group ID
+ longdesc: Unique ID for the SAP Central instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0_sapna0ci
+
+include:
+ - script: sapinstance
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_DVEBMGS01_sapna0ci
+ - name: InstanceName
+ value: NA0_DVEBMGS01_sapna0ci
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_DVEBMGS01_sapna0ci"
+ - script: virtual-ip
+ shortdesc: The Virtual IP address configured here will be for the SAP Central instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0ci
+ - name: ip
+ value: 172.17.2.55
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ shortdesc: "File system resource for the /usr/sap/SID/DVEBMGS## directory."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0ci
+ - name: directory
+ value: "/usr/sap/NA0/DVEBMGS01"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: sapinstance
+ - include: virtual-ip
+ - include: filesystem
+ - cib:
+ group {{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{sapinstance:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-db/main.yml b/scripts/sap-db/main.yml
new file mode 100644
index 0000000..b472f3f
--- /dev/null
+++ b/scripts/sap-db/main.yml
@@ -0,0 +1,63 @@
+version: 2.2
+category: SAP
+shortdesc: SAP Database Instance
+longdesc: |
+ Configure a SAP database instance including:
+
+ 1) A virtual IP address for the SAP database instance,
+
+ 2) A file system on shared storage (/sapdb),
+
+ 3) SAPinstance for the database.
+
+parameters:
+ - name: id
+ shortdesc: SAP Database Resource Group ID
+ longdesc: Unique ID for the SAP Database instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sapdb_NA0
+
+include:
+ - script: sapdb
+ required: true
+ - script: virtual-ip
+ shortdesc: The Virtual IP address configured here will be for the SAP Database instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0db
+ - name: ip
+ value: 172.17.2.54
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ shortdesc: "File system resource for the SAP database (typically /sapdb)."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0db
+ - name: directory
+ value: "/sapdb"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: sapdb
+ - include: virtual-ip
+ - include: filesystem
+ - cib:
+ group {{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{sapdb:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-simple-stack-plus/main.yml b/scripts/sap-simple-stack-plus/main.yml
new file mode 100644
index 0000000..3f1e996
--- /dev/null
+++ b/scripts/sap-simple-stack-plus/main.yml
@@ -0,0 +1,220 @@
+version: 2.2
+category: SAP
+shortdesc: SAP SimpleStack+ Instance
+longdesc: |
+ Configure a SAP instance including:
+
+ 1) Virtual IP addresses for each of the SAP instance services - ASCS, DB and CI,
+
+ 2) A RAID 1 host based mirror,
+
+ 3) A cluster manager LVM volume group and LVM volumes on the RAID 1 host based mirror,
+
+ 4) File systems on shared storage for sapmnt, /sapbd, /usr/sap/SID/ASCS## and /usr/sap/SID/DVEBMGS##,
+
+ 5) SAPinstance for - ASCS, a Database, a Central Instance.
+
+ The difference between this and the SimpleStack is that the ASCS and CI have their own
+ volumes / file systems / mount points rather than just one volume / file system / mount point on /usr/sap.
+
+parameters:
+ - name: id
+ shortdesc: SAP SimpleStack+ Resource Group ID
+ longdesc: Unique ID for the SAP SimpleStack+ instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0
+
+include:
+ - script: raid1
+ required: true
+ parameters:
+ - name: raidconf
+ value: "/etc/mdadm.conf"
+ - name: raiddev
+ value: "/dev/md0"
+
+ - script: lvm
+ required: true
+ shortdesc: LVM logical volumes for the SAP file systems.
+ parameters:
+ - name: volgrpname
+ value: sapvg
+
+ - script: filesystem
+ name: filesystem-sapmnt
+ required: true
+ shortdesc: File system resource for the sapmnt directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapmnt
+ - name: directory
+ value: "/sapmnt"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: filesystem
+ name: filesystem-usrsap
+ required: true
+ shortdesc: File system resource for the /usr/sap directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_usrsap
+ - name: directory
+ value: "/usr/sap"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapdb
+ required: true
+
+ - script: virtual-ip
+ name: virtual-ip-db
+ shortdesc: The Virtual IP address configured here will be for the SAP Database instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0db
+ - name: ip
+ value: 172.17.2.54
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ name: filesystem-db
+ shortdesc: "File system resource for the SAP database (typically /sapdb)."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0db
+ - name: directory
+ value: "/sapdb"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapinstance
+ name: sapinstance-as
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_ASCS00_sapna0as
+ - name: InstanceName
+ value: NA0_ASCS00_sapna0as
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_ASCS00_sapna0as"
+ - script: virtual-ip
+ name: virtual-ip-as
+ shortdesc: The Virtual IP address configured here will be for the SAP ASCS instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0as
+ - name: ip
+ value: 172.17.2.53
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ name: filesystem-as
+ shortdesc: "File system resource for the /usr/sap/SID/ASCS## directory."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0as
+ - name: directory
+ value: "/usr/sap/NA0/ASCS00"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapinstance
+ name: sapinstance-ci
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_DVEBMGS01_sapna0ci
+ - name: InstanceName
+ value: NA0_DVEBMGS01_sapna0ci
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_DVEBMGS01_sapna0ci"
+ - script: virtual-ip
+ name: virtual-ip-ci
+ shortdesc: The Virtual IP address configured here will be for the SAP Central instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0ci
+ - name: ip
+ value: 172.17.2.55
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ name: filesystem-ci
+ shortdesc: "File system resource for the /usr/sap/SID/DVEBMGS## directory."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0ci
+ - name: directory
+ value: "/usr/sap/NA0/DVEBMGS01"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: raid1
+ - include: lvm
+ - include: filesystem-sapmnt
+ - include: filesystem-db
+ - include: filesystem-ci
+ - include: filesystem-as
+ - include: virtual-ip-ci
+ - include: virtual-ip-db
+ - include: virtual-ip-as
+ - include: sapdb
+ - include: sapinstance-as
+ - include: sapinstance-ci
+ - cib:
+ group {{id}}
+ {{raid1:id}}
+ {{lvm:id}}
+ {{virtual-ip-db:id}}
+ {{filesystem-sapmnt:id}}
+ {{filesystem-db:id}}
+ {{sapdb:id}}
+ {{virtual-ip-as:id}}
+ {{filesystem-as:id}}
+ {{sapinstance-as:id}}
+ {{virtual-ip-ci:id}}
+ {{filesystem-ci:id}}
+ {{sapinstance-ci:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-simple-stack/main.yml b/scripts/sap-simple-stack/main.yml
new file mode 100644
index 0000000..654dd47
--- /dev/null
+++ b/scripts/sap-simple-stack/main.yml
@@ -0,0 +1,183 @@
+---
+version: 2.2
+category: SAP
+shortdesc: SAP SimpleStack Instance
+longdesc: |
+ Configure a SAP instance including:
+
+ 1) Virtual IP addresses for each of the SAP instance services - ASCS, DB and CI,
+
+ 2) A RAID 1 host based mirror,
+
+ 3) A cluster manager LVM volume group and LVM volumes on the RAID 1 host based mirror,
+
+ 4) File systems on shared storage for sapmnt, /sapbd and /usr/sap,
+
+ 5) SAPinstance for - ASCS, a Database, a Central Instance.
+
+parameters:
+ - name: id
+ shortdesc: SAP Simple Stack Resource Group ID
+ longdesc: Unique ID for the SAP SimpleStack instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0
+
+include:
+ - script: raid1
+ required: true
+ parameters:
+ - name: raidconf
+ value: "/etc/mdadm.conf"
+ - name: raiddev
+ value: "/dev/md0"
+
+ - script: lvm
+ required: true
+ shortdesc: LVM logical volumes for the SAP file systems.
+ parameters:
+ - name: volgrpname
+ value: sapvg
+
+ - script: filesystem
+ name: filesystem-sapmnt
+ required: true
+ shortdesc: File system resource for the sapmnt directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapmnt
+ - name: directory
+ value: "/sapmnt"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: filesystem
+ name: filesystem-usrsap
+ required: true
+ shortdesc: File system resource for the /usr/sap directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_usrsap
+ - name: directory
+ value: "/usr/sap"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapdb
+ required: true
+
+ - script: virtual-ip
+ name: virtual-ip-db
+ shortdesc: The Virtual IP address configured here will be for the SAP Database instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0db
+ - name: ip
+ value: 172.17.2.54
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+
+ - script: filesystem
+ name: filesystem-db
+ shortdesc: "File system resource for the SAP database (typically /sapdb)."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0db
+ - name: directory
+ value: "/sapdb"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapinstance
+ name: sapinstance-as
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_ASCS00_sapna0as
+ - name: InstanceName
+ value: NA0_ASCS00_sapna0as
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_ASCS00_sapna0as"
+
+ - script: virtual-ip
+ name: virtual-ip-as
+ shortdesc: The Virtual IP address configured here will be for the SAP ASCS instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0as
+ - name: ip
+ value: 172.17.2.53
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+
+ - script: sapinstance
+ name: sapinstance-ci
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_DVEBMGS01_sapna0ci
+ - name: InstanceName
+ value: NA0_DVEBMGS01_sapna0ci
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_DVEBMGS01_sapna0ci"
+
+ - script: virtual-ip
+ name: virtual-ip-ci
+ shortdesc: The Virtual IP address configured here will be for the SAP Central instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0ci
+ - name: ip
+ value: 172.17.2.55
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+
+actions:
+ - include: raid1
+ - include: lvm
+ - include: filesystem-usrsap
+ - include: filesystem-sapmnt
+ - include: filesystem-db
+ - include: virtual-ip-ci
+ - include: virtual-ip-db
+ - include: virtual-ip-as
+ - include: sapdb
+ - include: sapinstance-as
+ - include: sapinstance-ci
+ - cib:
+ group {{id}}
+ {{raid1:id}}
+ {{lvm:id}}
+ {{virtual-ip-ci:id}}
+ {{virtual-ip-db:id}}
+ {{virtual-ip-as:id}}
+ {{filesystem-usrsap:id}}
+ {{filesystem-sapmnt:id}}
+ {{filesystem-db:id}}
+ {{sapdb:id}}
+ {{sapinstance-as:id}}
+ {{sapinstance-ci:id}}
+ meta target-role=Stopped
diff --git a/scripts/sapdb/main.yml b/scripts/sapdb/main.yml
new file mode 100644
index 0000000..db67785
--- /dev/null
+++ b/scripts/sapdb/main.yml
@@ -0,0 +1,32 @@
+version: 2.2
+category: Script
+shortdesc: SAP Database Instance
+longdesc: Create a single SAP Database Instance.
+
+parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Unique ID for this SAP instance resource in the cluster.
+ type: resource
+ value: rsc_sabdb_NA0
+ - name: SID
+ required: true
+ shortdesc: Database SID
+ longdesc: The SID for the database.
+ type: string
+ value: NA0
+ - name: DBTYPE
+ required: true
+ shortdesc: Database Type
+ longdesc: The type of database.
+ value: ADA
+ type: string
+
+actions:
+ - cib: |
+ primitive {{id}} ocf:heartbeat:SAPDatabase
+ params SID="{{SID}}" DBTYPE="{{DBTYPE}}"
+ op monitor interval="120" timeout="60" start-delay="180"
+ op start timeout="1800"
+ op stop timeout="1800"
diff --git a/scripts/sapinstance/main.yml b/scripts/sapinstance/main.yml
new file mode 100644
index 0000000..b6da1b5
--- /dev/null
+++ b/scripts/sapinstance/main.yml
@@ -0,0 +1,48 @@
+version: 2.2
+category: Script
+shortdesc: SAP Instance
+longdesc: Create a single SAP Instance.
+
+parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Unique ID for this SAP instance resource in the cluster.
+ type: resource
+ value: sapinstance
+ - name: InstanceName
+ required: true
+ shortdesc: Instance Name
+ longdesc: The name of the SAP instance.
+ type: string
+ value: sapinstance
+ - name: START_PROFILE
+ required: true
+ shortdesc: Start Profile
+ longdesc: This defines the path and the file name of the SAP start profile of this particular instance.
+ type: string
+ - name: AUTOMATIC_RECOVER
+ required: true
+ shortdesc: Automatic Recover
+ longdesc: >-
+ The SAPInstance resource agent tries to recover a failed start
+ attempt automaticaly one time. This is done by killing runing
+ instance processes, removing the kill.sap file and executing
+ cleanipc. Sometimes a crashed SAP instance leaves some
+ processes and/or shared memory segments behind. Setting this
+ option to true will try to remove those leftovers during a
+ start operation. That is to reduce manual work for the
+ administrator.
+ type: boolean
+ value: true
+
+actions:
+ - cib: |
+ primitive {{id}} ocf:heartbeat:SAPInstance
+ params
+ InstanceName="{{InstanceName}}"
+ AUTOMATIC_RECOVER="{{AUTOMATIC_RECOVER}}"
+ START_PROFILE="{{START_PROFILE}}"
+ op monitor interval="180" timeout="60" start-delay="240"
+ op start timeout="240"
+ op stop timeout="240" on-fail="block"
diff --git a/scripts/sbd-device/main.yml b/scripts/sbd-device/main.yml
new file mode 100644
index 0000000..27fe8d0
--- /dev/null
+++ b/scripts/sbd-device/main.yml
@@ -0,0 +1,63 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Script
+shortdesc: "Create SBD Device"
+longdesc: |
+ Optional step to initialize and configure the SBD Device.
+
+ Prerequisites:
+
+ * The environment must have shared storage reachable by all nodes.
+
+parameters:
+ - name: device
+ shortdesc: Shared Storage Device
+ example: /dev/disk/by-id/...
+ required: true
+ type: string
+
+ - name: watchdog
+ shortdesc: Watchdog Device
+ value: /dev/watchdog
+ type: string
+
+actions:
+ - shortdesc: Verify configuration
+ sudo: true
+ call: |
+ #!/bin/sh
+ set -e
+ systemctl is-active --quiet sbd && { echo "ERROR: SBD daemon is already running"; exit 1; } || true
+ test -b "{{device}}" || { echo "ERROR: Not a device: {{device}"; exit 1; }
+ lsmod | egrep "(wd|dog)" || { echo "ERROR: No watchdog kernel module loaded"; exit 1; }
+ test -c "{{watchdog}}" || { echo "ERROR: Not a device: {{watchdog}}"; exit 1; }
+
+ - shortdesc: Initialize the SBD device
+ sudo: true
+ nodes: local
+ call: |
+ #!/bin/sh
+ sbd dump &> /dev/null || sbd -d "{{device}}" create
+ # sbd allocate "$(uname -n)" # FIXME
+
+ - shortdesc: Verify SBD Device
+ call: |
+ #!/bin/sh
+ sbd -d "{{device}}" list
+
+ - shortdesc: Configure SBD Daemon
+ sudo: true
+ call: |
+ #!/bin/sh
+ [ -f "/etc/sysconfig/sbd" ] && rm -f /etc/sysconfig/sbd || true
+ <<EOF
+ SBD_DEVICE="{{device}}"
+ SBD_WATCHDOG="yes"
+ SBD_WATCHDOG_DEV="{{watchdog}}"
+ EOF > /etc/sysconfig/sbd
+
+ - shortdesc: Enable SBD Daemon
+ service:
+ - sbd: start
diff --git a/scripts/sbd/main.yml b/scripts/sbd/main.yml
new file mode 100644
index 0000000..f86ef22
--- /dev/null
+++ b/scripts/sbd/main.yml
@@ -0,0 +1,37 @@
+# Copyright (C) 2009 Dejan Muhamedagic
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Stonith
+shortdesc: "SBD, Shared storage based fencing"
+longdesc: |
+ Create a SBD STONITH resource. SBD must be configured to use
+ a particular shared storage device using /etc/sysconfig/sbd.
+
+ This wizard can optionally create and configure a SBD device.
+ A shared device must be available and visible on all nodes.
+
+ For more information, see http://www.linux-ha.org/wiki/SBD_Fencing
+ or the sbd(8) manual page.
+
+parameters:
+ - name: id
+ shortdesc: Resource ID (Name)
+ value: sbd-fencing
+ example: sbd-fencing
+ required: true
+ type: resource
+
+include:
+ - script: sbd-device
+ required: false
+
+actions:
+ - include: sbd-device
+
+ - cib: |
+ primitive {{id}} stonith:external/sbd
+ pcmk_delay_max=30s
+
+ property stonith-enabled=true
diff --git a/scripts/virtual-ip/main.yml b/scripts/virtual-ip/main.yml
new file mode 100644
index 0000000..1ccb19e
--- /dev/null
+++ b/scripts/virtual-ip/main.yml
@@ -0,0 +1,24 @@
+version: 2.2
+shortdesc: Virtual IP
+category: Basic
+include:
+ - agent: ocf:heartbeat:IPaddr2
+ name: virtual-ip
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: ip
+ type: ip_address
+ required: true
+ - name: cidr_netmask
+ type: integer
+ required: false
+ - name: broadcast
+ type: string
+ required: false
+ ops: |
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+actions:
+ - include: virtual-ip
diff --git a/scripts/vmware/main.yml b/scripts/vmware/main.yml
new file mode 100644
index 0000000..0fd68d4
--- /dev/null
+++ b/scripts/vmware/main.yml
@@ -0,0 +1,60 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Stonith
+shortdesc: Fencing using vCenter / ESX Server
+longdesc: |
+ Note that SBD is the recommended fencing mechanism for VMware
+ hosts! Please refer to the documentation for more details on
+ recommended fencing configurations.
+
+ Fencing for VMware virtualized hosts using ESX Server or vCenter.
+
+ This wizard configures a fencing resource for a single node.
+ It is necessary to run the wizard for each node to fence.
+
+ Prerequisites
+
+ 1. Install the vSphere Web Services SDK on all nodes.
+
+ 2. Generate vCenter credentials using credstore_admin.pl
+
+ 3. Copy credentials to the same location on all nodes.
+
+parameters:
+ - name: id
+ type: resource
+ shortdesc: Base Resource ID
+ value: vcenter-fencing
+ required: true
+ - name: node_name
+ type: string
+ shortdesc: Name of node to fence
+ required: true
+ - name: machine_name
+ type: string
+ shortdesc: Name of machine in vCenter inventory
+ required: true
+ - name: server
+ type: string
+ shortdesc: VCenter server URL
+ required: true
+ example: vcenter.example.com
+ - name: credstore
+ type: string
+ shortdesc: Credentials file name
+ required: true
+
+actions:
+ - cib: |
+ primitive {{id}}-{{node_name}} stonith:external/vcenter
+ VI_SERVER="{{server}}"
+ VI_CREDSTORE="{{credstore}}"
+ HOSTLIST="{{node_name}}={{machine_name}}"
+ RESETPOWERON="0"
+ pcmk_host_check="static-list"
+ pcmk_host_list="{{node_name}}"
+ op monitor interval="60s"
+ location loc-{{id}}-{{node_name}} {{id}}-{{node_name}} -inf: {{node_name}}
+ property stonith-enabled=true
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..d13d998
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+# Note that this script only installs the python modules,
+# the other parts of crmsh are installed by autotools
+from setuptools import setup
+import contextlib
+import re
+
+VERSION = '0.0.1'
+
+with contextlib.suppress(Exception):
+ with open('version', 'r', encoding='ascii') as f:
+ match = re.match('^\\d+\\.\\d+\\.\\d+', f.read().strip())
+ if match:
+ VERSION = match.group(0)
+
+setup(name='crmsh',
+ version=VERSION,
+ description='Command-line interface for High-Availability cluster management',
+ author='Kristoffer Gronlund, Xin Liang',
+ author_email='XLiang@suse.com',
+ url='http://crmsh.github.io/',
+ packages=['crmsh', 'crmsh.crash_test', 'crmsh.report', 'crmsh.prun'],
+ install_requires=['lxml', 'PyYAML', 'python-dateutil'],
+ scripts=['bin/crm'],
+ data_files=[('/usr/share/crmsh', ['doc/crm.8.adoc'])],
+ include_package_data=True)
diff --git a/templates/apache b/templates/apache
new file mode 100644
index 0000000..955257b
--- /dev/null
+++ b/templates/apache
@@ -0,0 +1,61 @@
+%name apache
+
+# Copyright (C) 2009 Dejan Muhamedagic
+#
+# License: GNU General Public License (GPL)
+
+# Apache web server
+#
+# This template generates a single primitive resource of type apache
+
+%depends_on virtual-ip
+%suggests filesystem
+
+# NB:
+# The apache RA monitor operation requires the status module to
+# be loaded and access to its page (/server-status) allowed from
+# localhost (127.0.0.1). Typically, the status module is not
+# loaded by default. How to enable it depends on your
+# distribution. For instance, on recent openSUSE or SLES
+# releases, it is enough to add word "status" to the list in
+# variable APACHE_MODULES in file /etc/sysconfig/apache2 and then
+# start and stop apache once using rcapache2.
+
+%required
+
+# Name the apache resource
+# For example, to name the resource web-1, edit the line below
+# as follows:
+# %% id web-1
+%% id
+
+# The full pathname of the Apache configuration file
+# Example:
+# %% configfile /etc/apache2/httpd.conf
+%% configfile
+
+%optional
+
+# Extra options to apply when starting apache. See man httpd(8).
+
+%% options
+
+# Files (one or more) which contain extra environment variables,
+# such as /etc/apache2/envvars
+
+%% envfiles
+
+%generate
+
+primitive %apache ocf:heartbeat:apache
+ params configfile=%_:configfile
+ opt options=%_:options
+ opt envfiles=%_:envfiles
+
+monitor %apache 120s:60s
+
+group %_:id
+ %if %filesystem
+ %filesystem
+ %fi
+ %apache %virtual-ip
diff --git a/templates/clvm b/templates/clvm
new file mode 100644
index 0000000..96c4fff
--- /dev/null
+++ b/templates/clvm
@@ -0,0 +1,59 @@
+%name clvm
+
+# Copyright (C) 2009 Dejan Muhamedagic
+#
+# License: GNU General Public License (GPL)
+
+# Cluster-aware lvm (cloned)
+#
+# This template generates a cloned instance of clvm and one
+# volume group
+#
+# NB: You need just one clvm, regardless of how many volume
+# groups. In other words, you can use this template only for one
+# volume group and to make another one, you'll have to edit the
+# resulting configuration yourself.
+
+%required
+
+# Name the volume group (for example: vg-1)
+# The LVM resource will be in a cloned group with the rest
+# of the prerequisite resources. The clone is going to be named c-<id>
+# (e.g. c-vg-1)
+
+# For example, to name the resource vg-1, edit the line below
+# as follows:
+# %% id vg-1
+%% id
+
+# The volume group name
+# Example:
+# %% volgrpname myvolgroup
+%% volgrpname
+
+%generate
+
+primitive %_:id ocf:heartbeat:LVM
+ params volgrpname="%_:volgrpname"
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=30s timeout=60s
+
+primitive dlm ocf:pacemaker:controld
+ op start timeout=90s
+ op stop timeout=100s
+
+primitive clvm ocf:lvm2:clvmd
+ params daemon_timeout="30"
+ op start timeout=90s
+ op stop timeout=100s
+
+primitive cmirror ocf:lvm2:cmirrord
+ params daemon_timeout="30"
+ op start timeout=90s
+ op stop timeout=100s
+
+group g-%_:id dlm clvm cmirror %_:id
+
+clone c-%_:id g-%_:id
+ meta interleave="true" ordered="true"
diff --git a/templates/filesystem b/templates/filesystem
new file mode 100644
index 0000000..2699699
--- /dev/null
+++ b/templates/filesystem
@@ -0,0 +1,44 @@
+%name filesystem
+
+# Copyright (C) 2009 Dejan Muhamedagic
+#
+# License: GNU General Public License (GPL)
+
+# Filesystem
+#
+# This template generates a single primitive resource of type
+# Filesystem
+
+%required
+
+# The name of block device for the filesystem, or -U, -L
+# options for mount, or NFS mount specification.
+# Example:
+# %% device /dev/hda
+%% device
+
+# The mount point for the filesystem.
+# Example:
+# %% directory /mnt/fs
+%% directory
+
+# The type of filesystem to be mounted.
+# Example:
+# %% fstype xfs
+%% fstype
+
+%optional
+
+# Any extra options to be given as -o options to mount.
+#
+# For bind mounts, add "bind" here and set fstype to "none".
+# We will do the right thing for options such as "bind,ro".
+%% options
+
+%generate
+
+primitive %_ ocf:heartbeat:Filesystem
+ params
+ device=%_:device
+ directory=%_:directory
+ fstype=%_:fstype
diff --git a/templates/gfs2 b/templates/gfs2
new file mode 100644
index 0000000..244befd
--- /dev/null
+++ b/templates/gfs2
@@ -0,0 +1,74 @@
+%name gfs2
+
+# Copyright (C) 2009 Andrew Beekhof
+#
+# License: GNU General Public License (GPL)
+
+# gfs2 filesystem (cloned)
+#
+# This template generates a cloned instance of the ocfs2 filesystem
+#
+# The filesystem should be on the device, unless clvm is used
+# To use clvm, pull it along with this template:
+# new myfs ocfs2 clvm
+#
+# NB: You need just one dlm and o2cb, regardless of how many
+# filesystems. In other words, you can use this template only for
+# one filesystem and to make another one, you'll have to edit the
+# resulting configuration yourself.
+
+%depends_on gfs2-base
+%suggests clvm
+
+%required
+
+# Name the gfs2 filesystem
+# (for example: bigfs)
+# NB: The clone is going to be named c-<id> (e.g. c-bigfs)
+# Example:
+# %% id bigfs
+%% id
+
+# The mount point
+# Example:
+# %% directory /mnt/bigfs
+%% directory
+
+# The device
+
+%% device
+
+# optional parameters for the gfs2 filesystem
+
+%optional
+
+# mount options
+
+%% options
+
+%generate
+
+primitive %_:id ocf:heartbeat:Filesystem
+ params
+ directory="%_:directory"
+ fstype="gfs2"
+ device="%_:device"
+ opt options="%_:options"
+
+monitor %_:id 20:40
+
+clone c-%_:id %_:id
+ meta interleave="true" ordered="true"
+
+colocation colo-%_:id-gfs inf: c-%_:id gfs-clone
+
+order order-%_:id-gfs inf: gfs-clone c-%_:id
+
+# if there's clvm, generate some constraints too
+#
+
+%if %clvm
+colocation colo-%_:id-%clvm:id inf: c-%_:id c-%clvm:id
+
+order order-%_:id-%clvm:id inf: c-%clvm:id c-%_:id
+%fi
diff --git a/templates/gfs2-base b/templates/gfs2-base
new file mode 100644
index 0000000..d385ed4
--- /dev/null
+++ b/templates/gfs2-base
@@ -0,0 +1,46 @@
+%name gfs2-base
+
+# Copyright (C) 2009 Andrew Beekhof
+#
+# License: GNU General Public License (GPL)
+
+# gfs2 filesystem base (cloned)
+#
+# This template generates a cloned instance of the ocfs2 filesystem
+#
+# The filesystem should be on the device, unless clvm is used
+# To use clvm, pull it along with this template:
+# new myfs ocfs2 clvm
+#
+# NB: You need just one dlm and o2cb, regardless of how many
+# filesystems. In other words, you can use this template only for
+# one filesystem and to make another one, you'll have to edit the
+# resulting configuration yourself.
+
+%suggests clvm
+%required
+
+%generate
+
+primitive dlm ocf:pacemaker:controld
+
+clone dlm-clone dlm
+ meta interleave="true" ordered="true"
+
+primitive gfs-controld ocf:pacemaker:controld
+
+clone gfs-clone gfs-controld
+ meta interleave="true" ordered="true"
+
+colocation colo-gfs-dlm inf: gfs-clone dlm-clone
+
+order order-gfs-dlm inf: dlm-clone gfs-clone
+
+# if there's clvm, generate some constraints too
+#
+
+%if %clvm
+colocation colo-clvm-dlm inf: clvm-clone dlm-clone
+
+order order-clvm-dlm inf: dlm-clone clvm-clone
+%fi
diff --git a/templates/ocfs2 b/templates/ocfs2
new file mode 100644
index 0000000..ae07e8b
--- /dev/null
+++ b/templates/ocfs2
@@ -0,0 +1,61 @@
+%name ocfs2
+
+# Copyright (C) 2009 Dejan Muhamedagic
+#
+# License: GNU General Public License (GPL)
+
+# ocfs2 filesystem (cloned)
+#
+# This template generates a cloned instance of the ocfs2 filesystem
+#
+# NB: You need only one dlm, regardless of how many
+# filesystems. In other words, you can use this template only for
+# one filesystem and to make another one, you'll have to edit the
+# resulting configuration yourself.
+
+%required
+
+# Name the ocfs2 filesystem (for example: bigfs)
+# Example:
+# %% id bigfs
+%% id
+
+# The mount point
+# Example:
+# %% directory /mnt/bigfs
+%% directory
+
+# The device
+
+%% device
+
+# optional parameters for the ocfs2 filesystem
+
+%optional
+
+# mount options
+
+%% options
+
+%generate
+
+primitive %_:id ocf:heartbeat:Filesystem
+ params
+ directory="%_:directory"
+ fstype="ocfs2"
+ device="%_:device"
+ opt options="%_:options"
+ op start timeout=60s
+ op stop timeout=60s
+
+monitor %_:id 20s:40s
+
+primitive dlm ocf:pacemaker:controld
+ op start timeout=90s
+ op stop timeout=100s
+ op monitor interval=60s timeout=60s
+
+clone base-%_:id dlm meta interleave="true"
+clone clusterfs-%_:id clusterfs meta interleave="true"
+order base-then-clusterfs-%_:id inf: base-%_:id clusterfs-%_:id
+colocation clusterfs-with-base-%_:id inf: clusterfs-%_:id base-%_:id
diff --git a/templates/sbd b/templates/sbd
new file mode 100644
index 0000000..9ab201a
--- /dev/null
+++ b/templates/sbd
@@ -0,0 +1,34 @@
+%name sbd
+
+# Copyright (C) 2009 Dejan Muhamedagic
+#
+# License: GNU General Public License (GPL)
+
+# Shared storage based fencing.
+#
+# This template generates a single instance of external/sbd.
+#
+# There is quite a bit more to do to make this stonith operational.
+# See http://www.linux-ha.org/wiki/SBD_Fencing for information.
+#
+
+%required
+
+# The resource id (name).
+# Example:
+# %% id stonith-sbd
+%% id
+
+# Name of the device (shared disk).
+# NB: Make sure that the device remains the same on reboots. It's
+# preferable to use udev generated names rather than the usual
+# /dev/sd?
+# %% sbd_device /dev/sda
+%% sbd_device
+
+%generate
+
+primitive %_:id stonith:external/sbd
+ params sbd_device="%_:sbd_device"
+ op monitor interval=15s timeout=60s
+ op start timeout=60s
diff --git a/templates/virtual-ip b/templates/virtual-ip
new file mode 100644
index 0000000..c6ae46e
--- /dev/null
+++ b/templates/virtual-ip
@@ -0,0 +1,39 @@
+%name virtual-ip
+
+# Copyright (C) 2009 Dejan Muhamedagic
+#
+# License: GNU General Public License (GPL)
+
+# Virtual IP address
+#
+# This template generates a single primitive resource of type IPaddr
+
+%required
+
+# Specify an IP address
+# (for example: 192.168.1.101)
+# Example:
+# %% ip 192.168.1.101
+
+%% ip
+
+%optional
+
+# If your network has a mask different from its class mask, then
+# specify it here either in CIDR format or as a dotted quad
+# (for example: 24 or 255.255.255.0)
+# Example:
+# %% netmask 24
+
+%% netmask
+
+# Need LVS support? Set this to true then.
+
+%% lvs_support
+
+%generate
+
+primitive %_ ocf:heartbeat:IPaddr
+ params ip=%_:ip
+ opt cidr_netmask=%_:netmask
+ opt lvs_support=%_:lvs_support
diff --git a/test/README.regression b/test/README.regression
new file mode 100644
index 0000000..839ce59
--- /dev/null
+++ b/test/README.regression
@@ -0,0 +1,154 @@
+CRM shell regression tests
+
+* WARNING * WARNING * WARNING * WARNING * WARNING * WARNING *
+*
+* evaltest.sh uses eval to an extent you don't really want to
+* know about. Beware. Beware twice. Any input from the testcases
+* directory is considered to be trusted. So, think twice before
+* devising your tests lest you kill your precious data. Got it?
+* Good.
+*
+* Furthermore, we are deliberately small on testing the user
+* input and no one should try to predict what is to happen on
+* random input from the testcases.
+*
+* WARNING * WARNING * WARNING * WARNING * WARNING * WARNING *
+
+Manifest
+
+ regression.sh: the top level program
+ evaltest.sh: the engine test engine
+
+ crm-interface: interface to crm
+ descriptions: describe what we are about to do
+ defaults: the default settings for test commands
+
+ testcases/: here are the testcases and filters
+ crmtestout/: here goes the output
+
+All volatile data lives in the testcases/ directory.
+
+NB: You should never ever need to edit regression.sh and
+evaltest.sh. If you really have to, please talk to me and I will
+try to fix it so that you do not have to.
+
+Please write new test cases. The more the merrier :)
+
+Usage
+
+The usage is:
+
+ ./regression.sh ["prepare"] ["set:"<setname>|<testcase>]
+
+Test cases are collected in test sets. The default test set is
+basicset and running regression.sh without arguments will do all
+tests from that set.
+
+To show progress, for each test a '.' is printed. Once all tests
+have been evaluated, the output is checked against the expect
+file. If successful, "PASS" is printed, otherwise "FAIL".
+
+Specifying "prepare" will make regression.sh create expect
+output files for the given set of tests or testcase.
+
+The script may start and stop lrmd and stonithd if they are not
+running to support the crm ra set of commands.
+
+The following files may be generated:
+
+ output/<testcase>.out: the output of the testcase
+ output/regression.out: the output of regression.sh
+ output/crm.out: the output of crm tools/lrmd/stonithd etc
+
+On success output from testcases is removed and regression.out is
+empty.
+
+Driving the test cases yourself
+
+evaltest.sh accepts input from stdin, evaluates it immediately,
+and prints results to stdout/stderr. One can perhaps get a better
+feeling of what's actually going on by running it interactively.
+
+Test cases
+
+Tests are mainly written in the crm shell language with some simple
+regression test directives (starting with '%' and
+session/show/showxml).
+
+Special operations
+
+There are special operations with which it is possible to change
+environment and do other useful things. All special ops start
+with the '%' sign and may be followed by additional parameters.
+
+%setenv
+ change the environment variable; see defaults for the
+ set of global variables and resetvars() in evaltest.sh
+
+%stop
+ skip the rest of the tests
+
+%extcheck
+ feed the output of the next test case to the specified
+ external program/filter; the program should either reside in
+ testcases/ or be in the PATH, i.e.
+
+ %extcheck cat
+
+ simulates a null op :)
+
+ see testcases/metadata for some examples
+
+%ext
+ run an external command provided in the rest of the line; for
+ example:
+
+ %ext date
+
+ would print the current time (not very useful for regression
+ testing).
+
+%repeat num
+ repeat the next test num times
+ there are several variables which are substituted in the test
+ lines, so that we can simulate a for loop:
+
+ s/%t/$test_cnt/g
+ s/%l/$line/g
+ s/%j/$job_cnt/g
+ s/%i/$repeat_cnt/g
+
+ for example, to add 10 resources:
+
+ %repeat 10
+ configure primitive p-%i ocf:pacemaker:Dummy
+
+Filters and other auxiliary files
+
+Some output is necessarily very volatile, such as time stamps.
+It is possible to specify a filter for each testcase to get rid
+of superfluous information. A filter is a filter in UNIX
+sense, it takes input from stdin and prints results to stdout.
+
+There is a common filter called very inventively
+testcases/common.filter which is applied to all test cases.
+
+Except files are a list of extended regular expressions fed to
+egrep(1). That way one can filter out lines which are not
+interesting. Again, the one applied to all is
+testcases/common.excl.
+
+A test may need an arbitrary script executed before or after the
+test itself in order to ascertain some state. The two scripts
+have extensions .pre and .post respectively. Their output is sent
+to /dev/null and the exit status ignored.
+
+Finally, the daemon log files may be filtered using log_filter.
+
+The full collection of auxiliary files follows:
+
+ <TEST>.filter
+ <TEST>.excl
+ <TEST>.log_filter
+ <TEST>.pre
+ <TEST>.post
diff --git a/test/bugs-test.txt b/test/bugs-test.txt
new file mode 100644
index 0000000..f33e78f
--- /dev/null
+++ b/test/bugs-test.txt
@@ -0,0 +1,11 @@
+node node1
+primitive st stonith:null params hostlist=node1
+op_defaults timeout=60s
+group g1 gr1 gr2
+group g2 gr3
+group g3 gr4
+primitive gr1 Dummy
+primitive gr2 Dummy
+primitive gr3 Dummy
+primitive gr4 Dummy
+location loc1 g1 rule 200: #uname eq node1
diff --git a/test/cib-tests.sh b/test/cib-tests.sh
new file mode 100755
index 0000000..4df8062
--- /dev/null
+++ b/test/cib-tests.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+# Copyright (C) 2009 Lars Marowsky-Bree <lmb@suse.de>
+# See COPYING for license information.
+
+BASE=${1:-`pwd`}/cibtests
+AUTOCREATE=1
+
+logt() {
+ local msg="$1"
+ echo $(date) "$msg" >>$LOGF
+ echo "$msg"
+}
+
+difft() {
+ crm_diff -V -u -o $1 -n $2
+}
+
+run() {
+ local cmd="$1"
+ local erc="$2"
+ local msg="$3"
+ local rc
+ local out
+
+ echo $(date) "$1" >>$LOGF
+ CIB_file=$CIB_file $1 >>$LOGF 2>&1 ; rc=$?
+ echo $(date) "Returned: $rc (expected $erc)" >>$LOGF
+ if [ $erc != "I" ]; then
+ if [ $rc -ne $erc ]; then
+ logt "$msg: FAILED ($erc != $rc)"
+ cat $LOGF
+ return 1
+ fi
+ fi
+ echo "$msg: ok"
+ return 0
+}
+
+runt() {
+ local T="$1"
+ local CIBE="$BASE/$(basename $T .input).exp.xml"
+ cp $BASE/shadow.base $CIB_file
+ run "crm" 0 "Running testcase: $T" <$T
+
+ # strip <cib> attributes from CIB_file
+ echo "<cib>" > $CIB_file.$$
+ tail -n +2 $CIB_file >> $CIB_file.$$
+ mv $CIB_file.$$ $CIB_file
+
+ local rc
+ if [ ! -e $CIBE ]; then
+ if [ "$AUTOCREATE" = "1" ]; then
+ logt "Creating new expected output for $T."
+ cp $CIB_file $CIBE
+ return 0
+ else
+ logt "$T: No expected output."
+ return 0
+ fi
+ fi
+
+ if ! crm_diff -u -o $CIBE -n $CIB_file >/dev/null 2>&1 ; then
+ logt "$T: XML: $CIBE does not match $CIB_file"
+ difft $CIBE $CIB_file
+ return 1
+ fi
+ return 0
+}
+
+LOGF=$(mktemp)
+export PATH=/usr/sbin:$PATH
+
+export CIB_file=$BASE/shadow.test
+
+failed=0
+for T in $(ls $BASE/*.input) ; do
+ runt $T
+ failed=$(($? + $failed))
+done
+
+if [ $failed -gt 0 ]; then
+ logt "$failed tests failed!"
+ echo "Log:" $LOGF "CIB:" $CIB_file
+ exit 1
+fi
+
+logt "All tests passed!"
+#rm $LOGF $CIB_file
+exit 0
+
diff --git a/test/cibtests/001.exp.xml b/test/cibtests/001.exp.xml
new file mode 100644
index 0000000..c76e9d1
--- /dev/null
+++ b/test/cibtests/001.exp.xml
@@ -0,0 +1,20 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <primitive id="rsc_dummy" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" interval="30" id="rsc_dummy-monitor-30"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/001.input b/test/cibtests/001.input
new file mode 100644
index 0000000..8449a44
--- /dev/null
+++ b/test/cibtests/001.input
@@ -0,0 +1,6 @@
+configure
+property stonith-enabled=false
+primitive rsc_dummy ocf:heartbeat:Dummy
+monitor rsc_dummy 30
+commit
+quit
diff --git a/test/cibtests/002.exp.xml b/test/cibtests/002.exp.xml
new file mode 100644
index 0000000..13c017a
--- /dev/null
+++ b/test/cibtests/002.exp.xml
@@ -0,0 +1,26 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <clone id="testfs-clone">
+ <meta_attributes id="testfs-clone-meta_attributes">
+ <nvpair name="ordered" value="true" id="testfs-clone-meta_attributes-ordered"/>
+ <nvpair name="interleave" value="true" id="testfs-clone-meta_attributes-interleave"/>
+ </meta_attributes>
+ <primitive id="testfs" class="ocf" provider="heartbeat" type="Dummy">
+ <instance_attributes id="testfs-instance_attributes">
+ <nvpair name="fake" value="1" id="testfs-instance_attributes-fake"/>
+ </instance_attributes>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/002.input b/test/cibtests/002.input
new file mode 100644
index 0000000..7fd9acd
--- /dev/null
+++ b/test/cibtests/002.input
@@ -0,0 +1,8 @@
+configure
+property stonith-enabled=false
+primitive testfs ocf:heartbeat:Dummy \
+ params fake=1
+clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+commit
+quit
diff --git a/test/cibtests/003.exp.xml b/test/cibtests/003.exp.xml
new file mode 100644
index 0000000..70356af
--- /dev/null
+++ b/test/cibtests/003.exp.xml
@@ -0,0 +1,27 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <clone id="testfs-clone">
+ <meta_attributes id="testfs-clone-meta_attributes">
+ <nvpair name="ordered" value="true" id="testfs-clone-meta_attributes-ordered"/>
+ <nvpair name="interleave" value="true" id="testfs-clone-meta_attributes-interleave"/>
+ <nvpair id="testfs-clone-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="testfs" class="ocf" provider="heartbeat" type="Dummy">
+ <instance_attributes id="testfs-instance_attributes">
+ <nvpair name="fake" value="2" id="testfs-instance_attributes-fake"/>
+ </instance_attributes>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/003.input b/test/cibtests/003.input
new file mode 100644
index 0000000..171f1cd
--- /dev/null
+++ b/test/cibtests/003.input
@@ -0,0 +1,11 @@
+configure
+property stonith-enabled=false
+primitive testfs ocf:heartbeat:Dummy \
+ params fake=2
+clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+commit
+up
+resource stop testfs-clone
+quit
+
diff --git a/test/cibtests/004.exp.xml b/test/cibtests/004.exp.xml
new file mode 100644
index 0000000..2d4c618
--- /dev/null
+++ b/test/cibtests/004.exp.xml
@@ -0,0 +1,27 @@
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes/>
+ <resources>
+ <clone id="testfs-clone">
+ <meta_attributes id="testfs-clone-meta_attributes">
+ <nvpair name="ordered" value="true" id="testfs-clone-meta_attributes-ordered"/>
+ <nvpair name="interleave" value="true" id="testfs-clone-meta_attributes-interleave"/>
+ <nvpair id="testfs-clone-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="testfs" class="ocf" provider="heartbeat" type="Dummy">
+ <instance_attributes id="testfs-instance_attributes">
+ <nvpair name="fake" value="hello" id="testfs-instance_attributes-fake"/>
+ </instance_attributes>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/cibtests/004.input b/test/cibtests/004.input
new file mode 100644
index 0000000..86839bc
--- /dev/null
+++ b/test/cibtests/004.input
@@ -0,0 +1,11 @@
+configure
+property stonith-enabled=false
+primitive testfs ocf:heartbeat:Dummy \
+ params fake=hello
+clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+commit
+up
+resource start testfs-clone
+quit
+
diff --git a/test/cibtests/shadow.base b/test/cibtests/shadow.base
new file mode 100644
index 0000000..a4b376d
--- /dev/null
+++ b/test/cibtests/shadow.base
@@ -0,0 +1,10 @@
+<cib crm_feature_set="3.0.9" validate-with="pacemaker-2.0" epoch="59" num_updates="0" admin_epoch="0" cib-last-written="Tue Sep 2 12:08:39 2014">
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints/>
+ <acls/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/test/crm-interface b/test/crm-interface
new file mode 100644
index 0000000..b825dab
--- /dev/null
+++ b/test/crm-interface
@@ -0,0 +1,89 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+CIB=__crmsh_regtest
+
+filter_epoch() {
+ sed '/^<cib /s/ epoch="[0-9]*"/ epoch="1"/'
+}
+filter_date() {
+ sed '/^<cib /s/cib-last-written=".*"/cib-last-written="Sun Apr 12 21:37:48 2009"/'
+}
+filter_cib() {
+ sed -n '/^<?xml/,/^<\/cib>/p' | filter_date | filter_epoch
+}
+
+crm_setup() {
+ $CRM_NO_REG options reset
+ $CRM_NO_REG options check-frequency on-verify
+ $CRM_NO_REG options check-mode relaxed
+ $CRM_NO_REG cib delete $CIB >/dev/null 2>&1
+}
+
+crm_mksample() {
+ $CRM_NO_REG cib new $CIB empty >/dev/null 2>&1
+ $CRM_NO_REG -c $CIB<<EOF
+configure
+node node1
+primitive p0 ocf:pacemaker:Dummy
+primitive p1 ocf:pacemaker:Dummy
+primitive p2 ocf:heartbeat:Delay \
+ params startdelay=2 mondelay=2 stopdelay=2
+primitive p3 ocf:pacemaker:Dummy
+primitive st stonith:null params hostlist=node1
+clone c1 p1
+clone m1 p2 meta promotable=true
+op_defaults timeout=60s
+commit
+up
+EOF
+}
+crm_show() {
+ $CRM -c $CIB<<EOF
+configure
+_regtest on
+erase
+erase nodes
+`cat`
+show
+commit
+EOF
+}
+crm_showxml() {
+ $CRM -c $CIB<<EOF | filter_cib
+configure
+_regtest on
+erase
+erase nodes
+`cat`
+show xml
+commit
+EOF
+}
+crm_session() {
+ $CRM -c $CIB <<EOF
+`cat`
+EOF
+}
+crm_filesession() {
+ local _file=`mktemp`
+ $CRM_NO_REG -c $CIB<<EOF
+configure
+delete node1
+EOF
+ $CRM -c $CIB configure save xml $_file
+ CIB_file=$_file $CRM <<EOF
+`cat`
+EOF
+ rm -f $_file
+}
+crm_single() {
+ $CRM -c $CIB $*
+}
+crm_showobj() {
+ $CRM -c $CIB<<EOF | filter_date | filter_epoch
+configure
+_regtest on
+show xml $1
+EOF
+}
diff --git a/test/defaults b/test/defaults
new file mode 100644
index 0000000..50a7a6a
--- /dev/null
+++ b/test/defaults
@@ -0,0 +1,2 @@
+# defaults
+dflt_args=""
diff --git a/test/descriptions b/test/descriptions
new file mode 100644
index 0000000..694a528
--- /dev/null
+++ b/test/descriptions
@@ -0,0 +1,19 @@
+# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+lead=".TRY"
+describe_show() {
+ echo $lead $*
+}
+describe_showxml() {
+ : echo $lead $*
+}
+describe_session() {
+ echo $lead $*
+}
+describe_filesession() {
+ echo $lead $*
+}
+describe_single() {
+ echo $lead $*
+}
diff --git a/test/evaltest.sh b/test/evaltest.sh
new file mode 100755
index 0000000..1dd6394
--- /dev/null
+++ b/test/evaltest.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+# Copyright (C) 2007 Dejan Muhamedagic <dejan@suse.de>
+# See COPYING for license information.
+
+: ${TESTDIR:=testcases}
+: ${CRM:=crm}
+CRM_NO_REG="$CRM"
+CRM="$CRM -R"
+export PYTHONUNBUFFERED=1
+export CRMSH_REGRESSION_TEST=1
+
+if [ "$1" = prof ]; then
+ CRM="$CRM -X regtest.profile"
+fi
+
+. ./defaults
+. ./crm-interface
+. ./descriptions
+
+resetvars() {
+ unset args
+ unset extcheck
+}
+
+#
+# special operations squad
+#
+specopt_setenv() {
+ eval $rest
+}
+specopt_ext() {
+ eval $rest
+}
+specopt_extcheck() {
+ extcheck="$rest"
+ set $extcheck
+ which "$1" >/dev/null 2>&1 || # a program in the PATH
+ extcheck="$TESTDIR/$extcheck" # or our script
+}
+specopt_repeat() {
+ repeat_limit=$rest
+}
+specopt() {
+ cmd=$(echo $cmd | sed 's/%//') # strip leading '%'
+ echo ".$(echo "$cmd" | tr "[:lower:]" "[:upper:]") $rest" # show what we got
+ "specopt_$cmd" # do what they asked for
+}
+
+#
+# substitute variables in the test line
+#
+substvars() {
+ sed "
+ s/%t/$test_cnt/g
+ s/%l/$line/g
+ s/%i/$repeat_cnt/g
+ "
+}
+
+dotest_session() {
+ echo -n "." >&3
+ test_cnt=$(($test_cnt+1))
+ "describe_$cmd" "$*" # show what we are about to do
+ "crm_$cmd" | # and execute the command
+ { [ "$extcheck" ] && $extcheck || cat;}
+}
+dotest_single() {
+ echo -n "." >&3
+ test_cnt=$(($test_cnt+1))
+ describe_single "$*" # show what we are about to do
+ crm_single "$*" | # and execute the command
+ { [ "$extcheck" ] && $extcheck || cat;}
+ if [ "$showobj" ]; then
+ crm_showobj $showobj
+ fi
+}
+runtest_session() {
+ while read line; do
+ if [ "$line" = . ]; then
+ break
+ fi
+ echo "$line"
+ done | dotest_session $*
+}
+runtest_single() {
+ while [ $repeat_cnt -le $repeat_limit ]; do
+ dotest_single "$*"
+ resetvars # unset all variables
+ repeat_cnt=$(($repeat_cnt+1))
+ done
+ repeat_limit=1 repeat_cnt=1
+}
+
+#
+# run the tests
+#
+repeat_limit=1 repeat_cnt=1
+line=1
+test_cnt=1
+
+crm_setup
+crm_mksample
+while read cmd rest; do
+ case "$cmd" in
+ "") : empty ;;
+ "#"*) : a comment ;;
+ "%stop") break ;;
+ "%"*) specopt ;;
+ show|showxml|session|filesession) runtest_session $rest ;;
+ *) runtest_single $cmd $rest ;;
+ esac
+ line=$(($line+1))
+done
diff --git a/test/features/bootstrap_bugs.feature b/test/features/bootstrap_bugs.feature
new file mode 100644
index 0000000..e6a2d6e
--- /dev/null
+++ b/test/features/bootstrap_bugs.feature
@@ -0,0 +1,251 @@
+@bootstrap
+Feature: Regression test for bootstrap bugs
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ @clean
+ Scenario: Set placement-strategy value as "default"(bsc#1129462)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+ When Run "crm configure get_property placement-strategy" on "hanode1"
+ Then Got output "default"
+
+ @clean
+ Scenario: Empty value not allowed for option(bsc#1141976)
+ When Try "crm -c ' '"
+ Then Except "ERROR: Empty value not allowed for dest "cib""
+ When Try "crm cluster init --name ' '"
+ Then Except "ERROR: cluster.init: Empty value not allowed for dest "cluster_name""
+ When Try "crm cluster join -c ' '"
+ Then Except "ERROR: cluster.join: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster remove -c ' '"
+ Then Except "ERROR: cluster.remove: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster geo_init -a ' '"
+ Then Except "ERROR: cluster.geo_init: Empty value not allowed for dest "arbitrator""
+ When Try "crm cluster geo_join -c ' '"
+ Then Except "ERROR: cluster.geo_join: Empty value not allowed for dest "cluster_node""
+ When Try "crm cluster geo_init_arbitrator -c ' '"
+ Then Except "ERROR: cluster.geo_init_arbitrator: Empty value not allowed for dest "cluster_node""
+
+ @clean
+ Scenario: Setup cluster with crossed network(udpu only)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -u -i eth0 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Try "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "stopped" on "hanode2"
+ And Except "Cannot see peer node "hanode1", please check the communication IP" in stderr
+ When Run "crm cluster join -c hanode1 -i eth0 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ @clean
+ Scenario: Remove correspond nodelist in corosync.conf while remove(bsc#1165644)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -u -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
+ Then Expected "@hanode2.ip.0" in stdout
+ #And Service "hawk.service" is "started" on "hanode2"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Online nodes are "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ # verify bsc#1175708
+ #And Service "hawk.service" is "stopped" on "hanode2"
+ When Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
+ Then Expected "@hanode2.ip.0" not in stdout
+
+ @clean
+ Scenario: Multi nodes join in parallel(bsc#1175976)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2,hanode3"
+ Then Cluster service is "started" on "hanode2"
+ And Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Show cluster status on "hanode1"
+ And File "/etc/corosync/corosync.conf" was synced in cluster
+
+ @clean
+ Scenario: Multi nodes join in parallel timed out(bsc#1175976)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ # Try to simulate the join process hanging on hanode2 or hanode2 died
+ # Just leave the lock directory unremoved
+ When Run "mkdir /run/.crmsh_lock_directory" on "hanode1"
+ When Try "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Except "ERROR: cluster.join: Timed out after 120 seconds. Cannot continue since the lock directory exists at the node (hanode1:/run/.crmsh_lock_directory)"
+ When Run "rm -rf /run/.crmsh_lock_directory" on "hanode1"
+
+ @clean
+ Scenario: Change host name in /etc/hosts as alias(bsc#1183654)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "echo '@hanode1.ip.0 HANODE1'|sudo tee -a /etc/hosts" on "hanode1"
+ When Run "echo '@hanode2.ip.0 HANODE2'|sudo tee -a /etc/hosts" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c HANODE1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster remove HANODE2 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ And Online nodes are "hanode1"
+
+ @clean
+ Scenario: Stop service quickly(bsc#1203601)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster start --all;sudo crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "systemctl start corosync" on "hanode1"
+ Then Service "corosync" is "started" on "hanode1"
+ When Run "crm cluster stop" on "hanode1"
+ Then Service "corosync" is "stopped" on "hanode1"
+
+ @clean
+ Scenario: Can't stop all nodes' cluster service when local node's service is down(bsc#1213889)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Wait for DC
+ And Run "crm cluster stop" on "hanode1"
+ And Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: crm cluster join default behavior change in ssh key handling (bsc#1210693)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "rm -rf /home/alice/.ssh" on "hanode1"
+ When Run "rm -rf /home/alice/.ssh" on "hanode2"
+ When Run "su - alice -c "sudo crm cluster init -y"" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "su - alice -c "sudo crm cluster join -c hanode1 -y"" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Passwordless for root, not for sudoer(bsc#1209193)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "useradd -m -s /bin/bash xin" on "hanode1"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode1"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
+ When Run "useradd -m -s /bin/bash xin" on "hanode2"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode2"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
+ When Run "su xin -c "sudo crm cluster run 'touch /tmp/1209193'"" on "hanode1"
+ And Run "test -f /tmp/1209193" on "hanode1"
+ And Run "test -f /tmp/1209193" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Missing public key
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode1"
+ When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode2"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Check user shell for hacluster between "hanode1 hanode2"
+ Then Check passwordless for hacluster between "hanode1 hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Skip upgrade when preconditions are not satisfied
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "mv /root/.config/crm/crm.conf{,.bak}" on "hanode1"
+ Then Run "crm status" OK on "hanode1"
+ When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "mv /root/.config/crm/crm.conf{.bak,}" on "hanode1"
+ And Run "mv /root/.ssh{,.bak}" on "hanode1"
+ Then Run "crm status" OK on "hanode1"
+ And Run "rm -rf /root/.ssh && mv /root/.ssh{.bak,}" OK on "hanode1"
+
+ # skip non-root as behave_agent is not able to run commands interactively with non-root sudoer
+ @skip_non_root
+ @clean
+ Scenario: Owner and permssion of file authorized_keys (bsc#1217279)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ # in a newly created cluster
+ When Run "crm cluster init -y" on "hanode1"
+ And Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ And Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
+ # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys exists
+ When Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode1"
+ And Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
+ # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys does not exist
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "crm status" on "hanode1"
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1"
+ And Expected "hacluster:haclient" in stdout
+ Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2"
+ And Expected "hacluster:haclient" in stdout
diff --git a/test/features/bootstrap_init_join_remove.feature b/test/features/bootstrap_init_join_remove.feature
new file mode 100644
index 0000000..ed04525
--- /dev/null
+++ b/test/features/bootstrap_init_join_remove.feature
@@ -0,0 +1,205 @@
+@bootstrap
+Feature: crmsh bootstrap process - init, join and remove
+
+ Test crmsh bootstrap init/join/remove process
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Nodes ["hanode1", "hanode2", "hanode3"] are cleaned up
+ And Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ Scenario: Init cluster service on node "hanode1", and join on node "hanode2"
+
+ Scenario: Support --all or specific node to manage cluster and nodes
+ When Run "crm node standby --all" on "hanode1"
+ Then Node "hanode1" is standby
+ And Node "hanode2" is standby
+ When Run "crm node online --all" on "hanode1"
+ Then Node "hanode1" is online
+ And Node "hanode2" is online
+ When Wait for DC
+ When Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster start --all" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ When Wait for DC
+ When Run "crm cluster stop hanode2" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster start hanode2" on "hanode1"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster disable hanode2" on "hanode1"
+ Then Cluster service is "disabled" on "hanode2"
+ When Run "crm cluster enable hanode2" on "hanode1"
+ Then Cluster service is "enabled" on "hanode2"
+ When Run "crm cluster restart --all" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+
+ Scenario: Remove peer node "hanode2"
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode2"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode2"
+ Then File "/etc/corosync/authkey" exists on "hanode2"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode2"
+ Then File "/etc/pacemaker/authkey" exists on "hanode2"
+ Then Directory "/var/lib/csync2/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode2"
+ Then Directory "/var/lib/corosync/" not empty on "hanode2"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Online nodes are "hanode1"
+ And Show cluster status on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode2"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode2"
+ Then File "/etc/corosync/authkey" not exist on "hanode2"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode2"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode2"
+ Then Directory "/var/lib/csync2/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode2"
+ Then Directory "/var/lib/corosync/" is empty on "hanode2"
+
+ Scenario: Remove local node "hanode1"
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode1"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode1"
+ Then File "/etc/corosync/authkey" exists on "hanode1"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode1"
+ Then File "/etc/pacemaker/authkey" exists on "hanode1"
+ Then Directory "/var/lib/csync2/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode1"
+ Then Directory "/var/lib/corosync/" not empty on "hanode1"
+ When Run "crm cluster remove hanode1 -y --force" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Show cluster status on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode1"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode1"
+ Then File "/etc/corosync/authkey" not exist on "hanode1"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode1"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode1"
+ Then Directory "/var/lib/csync2/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode1"
+ Then Directory "/var/lib/corosync/" is empty on "hanode1"
+
+ Scenario: Remove peer node "hanode2" with `crm -F node delete`
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode2"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode2"
+ Then File "/etc/corosync/authkey" exists on "hanode2"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode2"
+ Then File "/etc/pacemaker/authkey" exists on "hanode2"
+ Then Directory "/var/lib/csync2/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode2"
+ Then Directory "/var/lib/corosync/" not empty on "hanode2"
+ When Run "crm -F cluster remove hanode2" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Online nodes are "hanode1"
+ And Show cluster status on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode2"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode2"
+ Then File "/etc/corosync/authkey" not exist on "hanode2"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode2"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode2"
+ Then Directory "/var/lib/csync2/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode2"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode2"
+ Then Directory "/var/lib/corosync/" is empty on "hanode2"
+ When Run "crm cluster remove hanode1 -y --force" on "hanode1"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode1"
+
+ Scenario: Remove local node "hanode1" with `crm -F node delete`
+ When Run "crm configure primitive d1 Dummy" on "hanode1"
+ When Run "crm configure primitive d2 Dummy" on "hanode1"
+ Then File "/etc/csync2/csync2.cfg" exists on "hanode1"
+ Then File "/etc/csync2/key_hagroup" exists on "hanode1"
+ Then File "/etc/corosync/authkey" exists on "hanode1"
+ Then File "/etc/corosync/corosync.conf" exists on "hanode1"
+ Then File "/etc/pacemaker/authkey" exists on "hanode1"
+ Then Directory "/var/lib/csync2/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" not empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" not empty on "hanode1"
+ Then Directory "/var/lib/corosync/" not empty on "hanode1"
+ When Run "crm -F node delete hanode1" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Show cluster status on "hanode2"
+ Then File "/etc/csync2/csync2.cfg" not exist on "hanode1"
+ Then File "/etc/csync2/key_hagroup" not exist on "hanode1"
+ Then File "/etc/corosync/authkey" not exist on "hanode1"
+ Then File "/etc/corosync/corosync.conf" not exist on "hanode1"
+ Then File "/etc/pacemaker/authkey" not exist on "hanode1"
+ Then Directory "/var/lib/csync2/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/cib/" is empty on "hanode1"
+ Then Directory "/var/lib/pacemaker/pengine/" is empty on "hanode1"
+ Then Directory "/var/lib/corosync/" is empty on "hanode1"
+
+ Scenario: Check hacluster's passwordless configuration on 2 nodes
+ Then Check user shell for hacluster between "hanode1 hanode2"
+ Then Check passwordless for hacluster between "hanode1 hanode2"
+
+ Scenario: Check hacluster's passwordless configuration in old cluster, 2 nodes
+ When Run "crm cluster stop --all" on "hanode1"
+ Then Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Check passwordless for hacluster between "hanode1 hanode2"
+
+ Scenario: Check hacluster's passwordless configuration on 3 nodes
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Check user shell for hacluster between "hanode1 hanode2 hanode3"
+ And Check passwordless for hacluster between "hanode1 hanode2 hanode3"
+
+ Scenario: Check hacluster's passwordless configuration in old cluster, 3 nodes
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Check passwordless for hacluster between "hanode1 hanode2 hanode3"
+
+ Scenario: Check hacluster's user shell
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2"
+ And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode3"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2"
+ And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode3"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2"
+ And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode3"
+ And Run "crm status" on "hanode1"
+ Then Check user shell for hacluster between "hanode1 hanode2 hanode3"
+ Then Check passwordless for hacluster between "hanode1 hanode2 hanode3"
diff --git a/test/features/bootstrap_options.feature b/test/features/bootstrap_options.feature
new file mode 100644
index 0000000..5ccc052
--- /dev/null
+++ b/test/features/bootstrap_options.feature
@@ -0,0 +1,165 @@
+@bootstrap
+Feature: crmsh bootstrap process - options
+
+ Test crmsh bootstrap options:
+ "--node": Additional nodes to add to the created cluster
+ "-i": Bind to IP address on interface IF
+ "-M": Configure corosync with second heartbeat line
+ "-n": Set the name of the configured cluster
+ "-A": Configure IP address as an administration virtual IP
+ "-u": Configure corosync to communicate over unicast
+ "-U": Configure corosync to communicate over multicast
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ @clean
+ Scenario: Check help output
+ When Run "crm -h" on "hanode1"
+ Then Output is the same with expected "crm" help output
+ When Run "crm cluster init -h" on "hanode1"
+ Then Output is the same with expected "crm cluster init" help output
+ When Run "crm cluster join -h" on "hanode1"
+ Then Output is the same with expected "crm cluster join" help output
+ When Run "crm cluster remove -h" on "hanode1"
+ Then Output is the same with expected "crm cluster remove" help output
+ When Run "crm cluster geo_init -h" on "hanode1"
+ Then Output is the same with expected "crm cluster geo-init" help output
+ When Run "crm cluster geo_join -h" on "hanode1"
+ Then Output is the same with expected "crm cluster geo-join" help output
+ When Run "crm cluster geo_init_arbitrator -h" on "hanode1"
+ Then Output is the same with expected "crm cluster geo-init-arbitrator" help output
+ When Try "crm cluster init -i eth1 -i eth1 -y"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Duplicated input for '-i/--interface' option
+ """
+ When Try "crm cluster init sbd -x -y" on "hanode1"
+ Then Expected "-x option or SKIP_CSYNC2_SYNC can't be used with any stage" in stderr
+ When Try "crm cluster init -i eth0 -i eth1 -i eth2 -y" on "hanode1"
+ Then Expected "Maximum number of interface is 2" in stderr
+ When Try "crm cluster init sbd -N hanode1 -N hanode2 -y" on "hanode1"
+ Then Expected "Can't use -N/--nodes option and stage(sbd) together" in stderr
+
+ @clean
+ Scenario: Init whole cluster service on node "hanode1" using "--node" option
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y --node "hanode1 hanode2 hanode3"" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Bind specific network interface using "-i" option
+ Given Cluster service is "stopped" on "hanode1"
+ And IP "@hanode1.ip.0" is belong to "eth1"
+ When Run "crm cluster init -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip.0" is used by corosync on "hanode1"
+ And Show corosync ring status
+
+ @clean
+ Scenario: Using multiple network interface using "-M" option
+ Given Cluster service is "stopped" on "hanode1"
+ And IP "@hanode1.ip.default" is belong to "eth0"
+ And IP "@hanode1.ip.0" is belong to "eth1"
+ When Run "crm cluster init -M -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip.default" is used by corosync on "hanode1"
+ And IP "@hanode1.ip.0" is used by corosync on "hanode1"
+ And Show corosync ring status
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Using multiple network interface using "-i" option
+ Given Cluster service is "stopped" on "hanode1"
+ And IP "@hanode1.ip.default" is belong to "eth0"
+ And IP "@hanode1.ip.0" is belong to "eth1"
+ When Run "crm cluster init -i eth0 -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip.default" is used by corosync on "hanode1"
+ And IP "@hanode1.ip.0" is used by corosync on "hanode1"
+ And Show corosync ring status
+
+ @clean
+ Scenario: Setup cluster name and virtual IP using "-A" option
+ Given Cluster service is "stopped" on "hanode1"
+ When Try "crm cluster init -A xxx -y"
+ Then Except "ERROR: cluster.init: 'xxx' does not appear to be an IPv4 or IPv6 address"
+ When Try "crm cluster init -A @hanode1.ip.0 -y"
+ Then Except "ERROR: cluster.init: Address already in use: @hanode1.ip.0"
+ When Run "crm cluster init -n hatest -A @vip.0 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster name is "hatest"
+ And Cluster virtual IP is "@vip.0"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Init cluster service with udpu using "-u" option
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -u -y -i eth0" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster is using udpu transport mode
+ And IP "@hanode1.ip.default" is used by corosync on "hanode1"
+ And Show corosync ring status
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Init cluster service with ipv6 using "-I" option
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -I -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip6.default" is used by corosync on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And IP "@hanode2.ip6.default" is used by corosync on "hanode2"
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Init cluster service with ipv6 unicast using "-I" and "-u" option
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -I -i eth1 -u -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And IP "@hanode1.ip6.default" is used by corosync on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And IP "@hanode2.ip6.default" is used by corosync on "hanode2"
+ And Show cluster status on "hanode1"
+ And Corosync working on "unicast" mode
+
+ @clean
+ Scenario: Init cluster service with multicast using "-U" option (bsc#1132375)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -U -i eth1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Show cluster status on "hanode1"
+ And Corosync working on "multicast" mode
+
+ @clean
+ Scenario: Init cluster with -N option (bsc#1175863)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -N hanode1 -N hanode2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+
+ @clean
+ Scenario: Skip using csync2 by -x option
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y -x" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "csync2.socket" is "stopped" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "csync2.socket" is "stopped" on "hanode2"
+ When Run "crm cluster init csync2 -y" on "hanode1"
+ Then Service "csync2.socket" is "started" on "hanode1"
+ And Service "csync2.socket" is "started" on "hanode2"
diff --git a/test/features/bootstrap_sbd_delay.feature b/test/features/bootstrap_sbd_delay.feature
new file mode 100644
index 0000000..8b636d1
--- /dev/null
+++ b/test/features/bootstrap_sbd_delay.feature
@@ -0,0 +1,286 @@
+@sbd
+Feature: configure sbd delay start correctly
+
+ Tag @clean means need to stop cluster service if the service is available
+
+ @clean
+ Scenario: disk-based SBD with small sbd_watchdog_timeout
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And SBD option "SBD_DELAY_START" value is "no"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ # calculated and set by sbd RA
+ And Cluster property "stonith-timeout" is "43"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ # SBD_DELAY_START >= (token + consensus + pcmk_delay_max + msgwait) # for disk-based sbd
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ # value_from_sbd >= 1.2 * (pcmk_delay_max + msgwait) # for disk-based sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ Given Has disk "/dev/sda1" on "hanode3"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Service "sbd" is "started" on "hanode3"
+ # SBD_DELAY_START >= (token + consensus + pcmk_delay_max + msgwait) # for disk-based sbd
+ # runtime value is "41", we keep the larger one here
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ # value_from_sbd >= 1.2 * (pcmk_delay_max + msgwait) # for disk-based sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ # runtime value is "71", we keep ther larger one here
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ When Run "crm cluster remove hanode3 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode3"
+ And Service "sbd" is "stopped" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ @clean
+ Scenario: disk-less SBD with small sbd_watchdog_timeout
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -S -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And SBD option "SBD_DELAY_START" value is "no"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And Cluster property "stonith-timeout" is "60"
+
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ # SBD_DELAY_START >= (token + consensus + 2*SBD_WATCHDOG_TIMEOUT) # for disk-less sbd
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ # stonith-timeout >= 1.2 * max(stonith_watchdog_timeout, 2*SBD_WATCHDOG_TIMEOUT) # for disk-less sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ And Cluster property "stonith-timeout" is "71"
+
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And Cluster property "stonith-timeout" is "71"
+
+ When Run "crm cluster remove hanode3 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And Cluster property "stonith-timeout" is "71"
+
+ @clean
+ Scenario: disk-based SBD with big sbd_watchdog_timeout
+ When Run "sed -i 's/watchdog_timeout: 15/watchdog_timeout: 60/' /etc/crm/profiles.yml" on "hanode1"
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "60"
+
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And SBD option "SBD_DELAY_START" value is "no"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ # calculated and set by sbd RA
+ And Cluster property "stonith-timeout" is "172"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ # SBD_DELAY_START >= (token + consensus + pcmk_delay_max + msgwait) # for disk-based sbd
+ And SBD option "SBD_DELAY_START" value is "161"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ # stonith-timeout >= 1.2 * (pcmk_delay_max + msgwait) # for disk-based sbd
+ # stonith_timeout >= max(value_from_sbd, constants.STONITH_TIMEOUT_DEFAULT) + token + consensus
+ And Cluster property "stonith-timeout" is "191"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ # since SBD_DELAY_START value(161s) > default systemd startup value(1min 30s)
+ And Run "test -f /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ # 1.2*SBD_DELAY_START
+ And Run "grep 'TimeoutSec=193' /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+
+ Given Has disk "/dev/sda1" on "hanode3"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Service "sbd" is "started" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "161"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ And Cluster property "stonith-timeout" is "191"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+ And Run "test -f /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ And Run "grep 'TimeoutSec=193' /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+
+ When Run "crm cluster remove hanode3 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode3"
+ And Service "sbd" is "stopped" on "hanode3"
+ And SBD option "SBD_DELAY_START" value is "161"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "60"
+ And SBD option "msgwait" value for "/dev/sda1" is "120"
+ And Cluster property "stonith-timeout" is "191"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ And Run "test -f /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ And Run "grep 'TimeoutSec=193' /etc/systemd/system/sbd.service.d/sbd_delay_start.conf" OK
+ When Run "sed -i 's/watchdog_timeout: 60/watchdog_timeout: 15/g' /etc/crm/profiles.yml" on "hanode1"
+
+ @clean
+ Scenario: Add sbd via stage on a running cluster
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ Then Service "sbd" is "started" on "hanode2"
+ And SBD option "SBD_DELAY_START" value is "71"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ And Cluster property "stonith-timeout" is "83"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ @clean
+ Scenario: Add disk-based sbd with qdevice
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+
+ When Run "crm cluster init -s /dev/sda1 --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+
+ And SBD option "SBD_DELAY_START" value is "41"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "15"
+ And SBD option "msgwait" value for "/dev/sda1" is "30"
+ And Cluster property "stonith-timeout" is "71"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+
+ @clean
+ Scenario: Add disk-less sbd with qdevice
+ Given Run "test -f /etc/crm/profiles.yml" OK
+ Given Yaml "default:corosync.totem.token" value is "5000"
+ Given Yaml "default:sbd.watchdog_timeout" value is "15"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+
+ When Run "crm cluster init -S --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+
+ And SBD option "SBD_DELAY_START" value is "81"
+ And SBD option "SBD_WATCHDOG_TIMEOUT" value is "35"
+ And Cluster property "stonith-timeout" is "95"
+ And Cluster property "stonith-watchdog-timeout" is "-1"
+
+ @clean
+ Scenario: Add and remove qdevice from cluster with sbd running
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+
+ @clean
+ Scenario: Test priority-fence-delay and priority
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "1"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "0"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "1"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "0"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "1"
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Parameter "pcmk_delay_max" configured in "stonith-sbd"
+ And Cluster property "stonith-timeout" is "83"
+ And Cluster property "priority-fencing-delay" is "60"
+ When Run "crm cluster remove hanode2 -y" on "hanode1"
+ Then Cluster service is "stopped" on "hanode2"
+ And Property "priority" in "rsc_defaults" is "0"
+ And Cluster property "priority-fencing-delay" is "0"
+ And Parameter "pcmk_delay_max" not configured in "stonith-sbd"
diff --git a/test/features/bootstrap_sbd_normal.feature b/test/features/bootstrap_sbd_normal.feature
new file mode 100644
index 0000000..8c5d421
--- /dev/null
+++ b/test/features/bootstrap_sbd_normal.feature
@@ -0,0 +1,272 @@
+@sbd
+Feature: crmsh bootstrap sbd management
+
+ Tag @clean means need to stop cluster service if the service is available
+
+ @clean
+ Scenario: Verify sbd device
+ When Try "crm cluster init -s "/dev/sda1;/dev/sda2;/dev/sda3;/dev/sda4" -y"
+ Then Except "ERROR: cluster.init: Maximum number of SBD device is 3"
+ When Try "crm cluster init -s "/dev/sda1;/dev/sdaxxxx" -y"
+ Then Except "ERROR: cluster.init: /dev/sdaxxxx doesn't look like a block device"
+ When Try "crm cluster init -s "/dev/sda1;/dev/sda1" -y"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Duplicated input for '-s/--sbd-device' option
+ """
+
+ @clean
+ Scenario: Setup sbd with init and join process(bsc#1170999)
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+
+ @clean
+ Scenario: Re-setup cluster without sbd(bsc#1166967)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "stopped" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "stopped" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+
+ @clean
+ Scenario: Configure diskless sbd(bsc#1181907)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -S -y" on "hanode1"
+ Then Expected "Diskless SBD requires cluster with three or more nodes." in stderr
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Expected "Diskless SBD requires cluster with three or more nodes." in stderr
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Expected "Diskless SBD requires cluster with three or more nodes." not in stderr
+ Then Cluster service is "started" on "hanode3"
+ And Service "sbd" is "started" on "hanode3"
+ And Resource "stonith:external/sbd" not configured
+
+ @clean
+ Scenario: Configure multi disks sbd
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda2" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Has disk "/dev/sda2" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -s /dev/sda2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+
+ @clean
+ Scenario: Configure sbd in several stages(bsc#1175057)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init ssh -y" on "hanode1"
+ And Run "crm cluster init csync2 -y" on "hanode1"
+ And Run "crm cluster init corosync -y" on "hanode1"
+ And Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ And Run "crm cluster init cluster -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join ssh -y -c hanode1" on "hanode2"
+ And Run "crm cluster join csync2 -y -c hanode1" on "hanode2"
+ And Run "crm cluster join ssh_merge -y -c hanode1" on "hanode2"
+ And Run "crm cluster join cluster -y -c hanode1" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Configure diskless sbd in several stages(bsc#1175057)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init ssh -y" on "hanode1"
+ And Run "crm cluster init csync2 -y" on "hanode1"
+ And Run "crm cluster init corosync -y" on "hanode1"
+ And Run "crm cluster init sbd -S -y" on "hanode1"
+ And Run "crm cluster init cluster -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join ssh -y -c hanode1" on "hanode2"
+ And Run "crm cluster join csync2 -y -c hanode1" on "hanode2"
+ And Run "crm cluster join ssh_merge -y -c hanode1" on "hanode2"
+ And Run "crm cluster join cluster -y -c hanode1" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+
+ @clean
+ Scenario: Configure sbd on running cluster via stage(bsc#1181906)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Configure sbd on running cluster via stage with ra running(bsc#1181906)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Expected "WARNING: To start sbd.service, need to restart cluster service manually on each node" in stderr
+ Then Service "sbd" is "stopped" on "hanode1"
+ And Service "sbd" is "stopped" on "hanode2"
+ When Run "crm cluster restart" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster restart" on "hanode2"
+ Then Service "sbd" is "started" on "hanode2"
+ When Run "sleep 20" on "hanode1"
+ Then Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Configure sbd when no watchdog device(bsc#1154927, bsc#1178869)
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Try "lsmod |grep softdog && rmmod softdog" on "hanode1"
+ And Try "lsmod |grep softdog && rmmod softdog" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -w softdog -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Try "lsmod |grep softdog"
+ Then Expected return code is "0"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+
+ @clean
+ Scenario: Setup sbd and test fence node
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ When Run "stonith_admin -H hanode2 -c" on "hanode1"
+ When Run "crm -F node fence hanode2" on "hanode1"
+ Then Expected return code is "0"
+ Then Node "hanode2" is UNCLEAN
+ Then Wait "60" seconds for "hanode2" successfully fenced
+
+ @skip_non_root
+ @clean
+ Scenario: Setup sbd and test fence node, use hacluster to fence
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ When Run "stonith_admin -H hanode2 -c" on "hanode1"
+ When Run "su hacluster -c '/usr/sbin/crm -F node fence hanode2'" on "hanode1"
+ Then Expected return code is "0"
+ Then Node "hanode2" is UNCLEAN
+ Then Wait "60" seconds for "hanode2" successfully fenced
+
+ @clean
+ Scenario: Change existing diskbased sbd cluster as diskless sbd
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '" OK
+
+ When Run "crm -F cluster init sbd -S -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+ When Try "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '"
+ Then Expected return code is "1"
+
+ @clean
+ Scenario: Change existing diskless sbd cluster as diskbased sbd
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -S -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith:external/sbd" not configured
+
+ When Run "crm -F cluster init sbd -s /dev/sda1 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '" OK
+
+ @clean
+ Scenario: Change sbd device
+ Given Has disk "/dev/sda1" on "hanode1"
+ Given Has disk "/dev/sda2" on "hanode1"
+ Given Cluster service is "stopped" on "hanode1"
+ Given Has disk "/dev/sda1" on "hanode2"
+ Given Has disk "/dev/sda2" on "hanode2"
+ Given Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '" OK
+
+ When Run "crm -F cluster init sbd -s /dev/sda2 -y" on "hanode1"
+ Then Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Run "ps -ef|grep -v grep|grep 'watcher: /dev/sda2 '" OK
+ When Try "ps -ef|grep -v grep|grep 'watcher: /dev/sda1 '"
+ Then Expected return code is "1"
diff --git a/test/features/cluster_api.feature b/test/features/cluster_api.feature
new file mode 100644
index 0000000..b8676be
--- /dev/null
+++ b/test/features/cluster_api.feature
@@ -0,0 +1,143 @@
+@cluster_api
+Feature: Functional test to cover SAP clusterAPI
+
+ To avoid possible regression on crmsh side when adapting SAP Applications
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm configure primitive d Dummy" on "hanode1"
+ And Wait "3" seconds
+ Then Resource "d" type "Dummy" is "Started"
+ And Show cluster status on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' > ~hacluster/.bashrc" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' > ~hacluster/.bashrc" on "hanode2"
+
+ @clean
+ Scenario: Start and stop resource by hacluster
+ When Run "su - hacluster -c 'crm resource stop d'" on "hanode1"
+ Then Expected return code is "0"
+ When Wait "3" seconds
+ Then Resource "d" type "Dummy" is "Stopped"
+ And Show cluster status on "hanode1"
+ When Run "su - hacluster -c 'crm resource start d'" on "hanode1"
+ Then Expected return code is "0"
+ When Wait "3" seconds
+ Then Resource "d" type "Dummy" is "Started"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Resource move by hacluster
+ Given Resource "d" is started on "hanode1"
+ # move <res> <node>
+ When Run "su - hacluster -c 'crm resource move d hanode2'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <node> force
+ When Run "su - hacluster -c 'crm resource move d hanode1'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> force
+ When Run "su - hacluster -c 'crm resource move d force'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <lifetime> force
+ When Run "su - hacluster -c 'crm resource move d PT5M force'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <node> <lifetime>
+ When Run "su - hacluster -c 'crm resource move d hanode2 PT5M'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ # move <res> <node> <lifetime> force
+ When Run "su - hacluster -c 'crm resource move d hanode1 PT5M force'" on "hanode1"
+ Then Expected return code is "0"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "su - hacluster -c 'crm resource clear d'" on "hanode1"
+ Then Expected return code is "0"
+
+ When Try "crm resource move d hanode2 PT5M force xxx"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d hanode2 PT5M forcd"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d xxxx PT5M force"
+ Then Except "ERROR: resource.move: Not our node: xxxx"
+ When Try "crm resource move d"
+ Then Except "ERROR: resource.move: No target node: Move requires either a target node or 'force'"
+
+ @clean
+ Scenario: Run "crm configure show" by hacluster
+ When Run "crm configure primitive d2 Dummy op monitor interval=10s timeout=20s on-fail=restart params fake=test meta resource-stickiness=5000" on "hanode1"
+ And Run "crm configure group g d2 meta resource-stickiness=3000" on "hanode1"
+ And Wait "3" seconds
+ Then Resource "d2" type "Dummy" is "Started"
+ And Show cluster status on "hanode1"
+ When Run "su - hacluster -c 'crm configure show'" on "hanode1"
+ Then Expected return code is "0"
+ And Expected multiple lines in output
+ """
+ primitive d2 Dummy \
+ params fake=test \
+ meta resource-stickiness=5000 \
+ op monitor interval=10s timeout=20s on-fail=restart \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+ group g d2 \
+ meta resource-stickiness=3000
+ """
+
+ @clean
+ Scenario: pacemaker ACL related operations by hacluster
+ When Run "su - hacluster -c 'crm configure primitive d2 Dummy'" on "hanode1"
+ And Wait "3" seconds
+ Then Resource "d2" type "Dummy" is "Started"
+ When Run "su - hacluster -c 'crm maintenance on'" on "hanode1"
+ When Run "crm_mon -1" on "hanode1"
+ Then Expected "Resource management is DISABLED" in stdout
+ When Run "su - hacluster -c 'crm maintenance off'" on "hanode1"
+ When Run "crm_mon -1" on "hanode1"
+ Then Expected "Resource management is DISABLED" not in stdout
+ When Run "su - hacluster -c 'crm node standby hanode2'" on "hanode1"
+ Then Node "hanode2" is standby
+ When Run "su - hacluster -c 'crm node online hanode2'" on "hanode1"
+ Then Node "hanode2" is online
+ When Run "su - hacluster -c 'crm ra providers Dummy'" on "hanode1"
+ Then Expected "heartbeat pacemaker" in stdout
+ When Run "su - hacluster -c 'crm status'" on "hanode1"
+ Then Expected "Online: [ hanode1 hanode2 ]" in stdout
+ When Run "su - hacluster -c '/usr/sbin/crm report /tmp/report'" on "hanode1"
+ Then No crmsh tracebacks
+ Then File "/tmp/report.tar.bz2" exists on "hanode1"
+ And Directory "hanode1" in "/tmp/report.tar.bz2"
+ And Directory "hanode2" in "/tmp/report.tar.bz2"
+ And File "pacemaker.log" in "/tmp/report.tar.bz2"
+ And File "corosync.conf" in "/tmp/report.tar.bz2"
diff --git a/test/features/configure_bugs.feature b/test/features/configure_bugs.feature
new file mode 100644
index 0000000..7b1222d
--- /dev/null
+++ b/test/features/configure_bugs.feature
@@ -0,0 +1,38 @@
+@configure
+Feature: Functional test for configure sub level
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ @clean
+ Scenario: Replace sensitive data by default(bsc#1163581)
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+
+ # mask password by default
+ When Run "crm node utilization hanode1 set password=qwertyui" on "hanode1"
+ When Try "crm configure show|grep password|grep qwertyui"
+ Then Expected return code is "1"
+ When Run "crm node utilization hanode2 set password testingpass" on "hanode1"
+ When Try "crm configure show|grep password|grep testingpass"
+ Then Expected return code is "1"
+ And Show crm configure
+
+ # mask password and ip address
+ When Run "crm configure primitive ip2 IPaddr2 params ip=@vip.0" on "hanode1"
+ And Run "sed -i 's/; \[core\]/[core]/' /etc/crm/crm.conf" on "hanode1"
+ And Run "sed -i 's/; obscure_pattern = .*$/obscure_pattern = passw*|ip/g' /etc/crm/crm.conf" on "hanode1"
+ And Try "crm configure show|grep -E "@vip.0|qwertyui""
+ Then Expected return code is "1"
+ And Show crm configure
+
+ # mask password and ip address with another pattern
+ When Run "sed -i 's/obscure_pattern = .*$/obscure_pattern = passw* ip/g' /etc/crm/crm.conf" on "hanode1"
+ And Try "crm configure show|grep -E "@vip.0|qwertyui""
+ Then Expected return code is "1"
+ And Show crm configure
diff --git a/test/features/constraints_bugs.feature b/test/features/constraints_bugs.feature
new file mode 100644
index 0000000..c1174d5
--- /dev/null
+++ b/test/features/constraints_bugs.feature
@@ -0,0 +1,24 @@
+@constraints
+Feature: Verify constraints(order/colocation/location) bug
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Convert score to kind for rsc_order(bsc#1122391)
+ When Run "crm configure primitive d1 Dummy op monitor interval=10s" on "hanode1"
+ And Run "crm configure primitive d2 Dummy op monitor interval=10s" on "hanode1"
+ And Run "crm configure order o1 100: d1 d2" on "hanode1"
+ When Run "crm configure show" on "hanode1"
+ Then Expected "order o1 Mandatory: d1 d2" in stdout
diff --git a/test/features/coveragerc b/test/features/coveragerc
new file mode 100644
index 0000000..cb0403e
--- /dev/null
+++ b/test/features/coveragerc
@@ -0,0 +1,4 @@
+[run]
+data_file = /.coverage
+parallel = True
+source_pkgs = crmsh
diff --git a/test/features/crm_report_bugs.feature b/test/features/crm_report_bugs.feature
new file mode 100644
index 0000000..58d158b
--- /dev/null
+++ b/test/features/crm_report_bugs.feature
@@ -0,0 +1,164 @@
+@crm_report
+Feature: crm report functional test for verifying bugs
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: Verify crm report handle files contain non-utf-8 characters (bsc#1130715)
+ When Run "echo 'abc#$%%^' | iconv -f UTF-8 -t UTF-16 > /opt/text_non_utf8" on "hanode1"
+ Then This file "/opt/text_non_utf8" will trigger UnicodeDecodeError exception
+ When Run "crm report -E /opt/text_non_utf8 report1" on "hanode1"
+ Then File "text_non_utf8" in "report1.tar.bz2"
+ When Run "rm -f report1.tar.bz2" on "hanode1"
+
+ @clean
+ Scenario: Compressed file ended before the end-of-stream marker was reached (bsc#1206606)
+ When Run "touch /var/log/pacemaker/pacemaker.log-20221220.xz" on "hanode1"
+ When Try "crm report report1" on "hanode1"
+ Then File "pacemaker.log" in "report1.tar.bz2"
+ And Expected "When reading file "/var/log/pacemaker/pacemaker.log-20221220.xz": Compressed file ended before the end-of-stream marker was reached" in stderr
+ When Run "rm -f report1.tar.bz2" on "hanode1"
+
+ @clean
+ Scenario: Include archived logs(bsc#1148873)
+ # For syslog
+ When Write multi lines to file "/var/log/log1" on "hanode1"
+ """
+ Sep 08 08:36:34 node1 log message line1
+ Sep 08 08:37:01 node1 log message line2
+ Sep 08 08:37:02 node1 log message line3
+ """
+ And Run "xz /var/log/log1" on "hanode1"
+ # bsc#1218491, unable to gather log files that are in the syslog format
+ And Run "touch -m -t 202201010000 /var/log/log1.xz" on "hanode1"
+ When Write multi lines to file "/var/log/log1" on "hanode1"
+ """
+ Sep 08 09:37:02 node1 log message line4
+ Sep 08 09:37:12 node1 log message line5
+ """
+ # bsc#1218491, unable to gather log files that are in the syslog format
+ And Run "touch -m -t 202201010001 /var/log/log1" on "hanode1"
+ And Run "crm report -f 20200901 -E /var/log/log1 report1" on "hanode1"
+ Then File "log1" in "report1.tar.bz2"
+ When Run "tar jxf report1.tar.bz2" on "hanode1"
+ And Run "cat report1/hanode1/log1" on "hanode1"
+ Then Expected multiple lines in output
+ """
+ Sep 08 08:36:34 node1 log message line1
+ Sep 08 08:37:01 node1 log message line2
+ Sep 08 08:37:02 node1 log message line3
+ Sep 08 09:37:02 node1 log message line4
+ Sep 08 09:37:12 node1 log message line5
+ """
+ When Run "rm -rf report1.tar.gz report1" on "hanode1"
+
+ # For rfc5424
+ When Write multi lines to file "/var/log/log2" on "hanode1"
+ """
+ 2022-09-08T14:24:36.003Z mymachine.example.com myapp - ID47
+ 2022-09-08T14:25:15.003Z mymachine.example.com myapp - ID48
+ 2022-09-08T14:26:15.003Z mymachine.example.com myapp - ID49
+ """
+ And Run "xz /var/log/log2" on "hanode1"
+ When Write multi lines to file "/var/log/log2" on "hanode1"
+ """
+ 2022-09-08T14:27:15.003Z mymachine.example.com myapp - ID50
+ 2022-09-08T14:28:15.003Z mymachine.example.com myapp - ID51
+ """
+ And Run "crm report -f 20200901 -E /var/log/log2 report1" on "hanode1"
+ Then File "log2" in "report1.tar.bz2"
+ When Run "tar jxf report1.tar.bz2" on "hanode1"
+ And Run "cat report1/hanode1/log2" on "hanode1"
+ Then Expected multiple lines in output
+ """
+ 2022-09-08T14:24:36.003Z mymachine.example.com myapp - ID47
+ 2022-09-08T14:25:15.003Z mymachine.example.com myapp - ID48
+ 2022-09-08T14:26:15.003Z mymachine.example.com myapp - ID49
+ 2022-09-08T14:27:15.003Z mymachine.example.com myapp - ID50
+ 2022-09-08T14:28:15.003Z mymachine.example.com myapp - ID51
+ """
+ When Run "rm -rf report1.tar.gz report1" on "hanode1"
+
+ @clean
+ Scenario: Collect corosync.log(bsc#1148874)
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1no/' /etc/corosync/corosync.conf" on "hanode1"
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1no/' /etc/corosync/corosync.conf" on "hanode2"
+ And Run "corosync-cfgtool -R" on "hanode1"
+ And Run "rm -f /var/log/cluster/corosync.log" on "hanode1"
+ And Run "rm -f /var/log/cluster/corosync.log" on "hanode2"
+ And Run "crm cluster stop --all" on "hanode1"
+ And Run "crm cluster start --all" on "hanode1"
+ And Run "sleep 15" on "hanode1"
+
+ And Run "crm report report" on "hanode1"
+ And Run "tar jxf report.tar.bz2" on "hanode1"
+ Then File "corosync.log" not in "report.tar.bz2"
+ When Run "rm -rf report.tar.gz report" on "hanode1"
+
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1yes/' /etc/corosync/corosync.conf" on "hanode1"
+ When Run "sed -i 's/\(\s*to_logfile:\s*\).*/\1yes/' /etc/corosync/corosync.conf" on "hanode2"
+ And Run "crm cluster stop --all" on "hanode1"
+ And Run "crm cluster start --all" on "hanode1"
+ And Run "sleep 15" on "hanode1"
+
+ And Run "crm report report" on "hanode1"
+ And Run "tar jxf report.tar.bz2" on "hanode1"
+ Then File "corosync.log" in "report.tar.bz2"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ @clean
+ Scenario: Replace sensitive data(bsc#1163581)
+ # Set sensitive data TEL and password
+ When Run "crm node utilization hanode1 set TEL 13356789876" on "hanode1"
+ When Run "crm node utilization hanode1 set password qwertyui" on "hanode1"
+ When Run "crm report report" on "hanode1"
+ When Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R 'qwertyui' report"
+ # crm report mask passw.* by default
+ # No password here
+ Then Expected return code is "1"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ # mask password and ip address by using crm.conf
+ When Run "crm configure primitive ip2 IPaddr2 params ip=@vip.0" on "hanode1"
+ And Run "sed -i 's/; \[report\]/[report]/' /etc/crm/crm.conf" on "hanode1"
+ And Run "sed -i 's/; sanitize_rule = .*$/sanitize_rule = passw.*|ip.*:raw/g' /etc/crm/crm.conf" on "hanode1"
+ And Run "crm report report" on "hanode1"
+ And Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R -E '@vip.0|qwertyui' report"
+ # No password here
+ Then Expected return code is "1"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ # Do sanitize job, also for TEL
+ When Run "crm report -s -p TEL report" on "hanode1"
+ When Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R 'qwertyui' report"
+ # No password here
+ Then Expected return code is "1"
+ When Try "grep -R '13356789876' report"
+ # No TEL number here
+ Then Expected return code is "1"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ # disable sanitize
+ When Run "sed -i 's/; \[report\]/[report]/' /etc/crm/crm.conf" on "hanode1"
+ And Run "sed -i 's/sanitize_rule = .*$/sanitize_rule = /g' /etc/crm/crm.conf" on "hanode1"
+ When Run "crm report report" on "hanode1"
+ When Run "tar jxf report.tar.bz2" on "hanode1"
+ And Try "grep -R 'qwertyui' report"
+ # found password
+ Then Expected return code is "0"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
diff --git a/test/features/crm_report_normal.feature b/test/features/crm_report_normal.feature
new file mode 100644
index 0000000..00a1f2b
--- /dev/null
+++ b/test/features/crm_report_normal.feature
@@ -0,0 +1,109 @@
+@crm_report
+Feature: crm report functional test for common cases
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: crm report collect trace ra log
+ When Run "crm configure primitive d Dummy" on "hanode1"
+ And Run "crm configure primitive d2 Dummy" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ And Resource "d2" is started on "hanode2"
+ When Run "crm resource trace d monitor" on "hanode1"
+ Then Expected "Trace for d:monitor is written to /var/lib/heartbeat/trace_ra/Dummy" in stdout
+ When Wait "10" seconds
+ And Run "crm resource untrace d" on "hanode1"
+ And Run "crm resource trace d2 monitor /trace_d" on "hanode1"
+ Then Expected "Trace for d2:monitor is written to /trace_d/Dummy" in stdout
+ When Wait "10" seconds
+ And Run "crm resource untrace d2" on "hanode1"
+ And Run "crm report report" on "hanode1"
+ Then No crmsh tracebacks
+ Then Directory "trace_ra" in "report.tar.bz2"
+ And Directory "trace_d" in "report.tar.bz2"
+ When Run "rm -rf report.tar.bz2 report" on "hanode1"
+
+ @clean
+ Scenario: Run history and script
+ When Run "crm history info" on "hanode1"
+ When Run "crm history refresh" on "hanode1"
+ When Try "crm history peinputs|grep "pengine/pe-input-0""
+ Then Expected return code is "0"
+ When Try "crm history info|grep "Nodes: hanode1 hanode2""
+ Then Expected return code is "0"
+ When Run "crm configure primitive d100 Dummy" on "hanode1"
+ When Run "crm history refresh force" on "hanode1"
+ When Try "crm history info|grep "Resources: d100""
+ Then Expected return code is "0"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ When Run "crm history refresh force" on "hanode1"
+ When Try "crm history info|grep "Nodes: hanode1 hanode2 hanode3""
+ Then Expected return code is "0"
+ When Run "crm script run health" on "hanode1"
+ When Run "crm script run virtual-ip id=vip_x ip=@vip.0" on "hanode1"
+ Then Resource "vip_x" type "IPaddr2" is "Started"
+
+ @clean
+ Scenario: Common tests
+ When Run "crm report -h" on "hanode1"
+
+ When Try "crm report "*s"" on "hanode1"
+ Then Expected "*s is invalid file name" in stderr
+
+ When Try "crm report /fsf/report" on "hanode1"
+ Then Expected "Directory /fsf does not exist" in stderr
+
+ When Try "crm report -n fs" on "hanode1"
+ Then Expected "host "fs" is unreachable:" in stderr
+
+ When Try "crm report -f xxxx" on "hanode1"
+ Then Expected "Invalid time string 'xxxx'" in stderr
+
+ When Try "crm report -f 1d -t 2d" on "hanode1"
+ Then Expected "The start time must be before the finish time" in stderr
+
+ When Run "crm -d report -S -d /tmp/report" on "hanode1"
+ Then Directory "/tmp/report/hanode1" created
+ Then Directory "/tmp/report/hanode2" not created
+ When Run "rm -rf /tmp/report" on "hanode1"
+
+ When Run "crm report -vv" on "hanode1"
+ Then Default crm_report tar file created
+ When Remove default crm_report tar file
+
+ When Run "crm report -d /tmp/report" on "hanode1"
+ Then Directory "/tmp/report" created
+ When Try "crm report -d /tmp/report" on "hanode1"
+ Then Expected "Destination directory /tmp/report exists, please cleanup or use -Z option" in stderr
+ When Run "crm report -d -Z /tmp/report" on "hanode1"
+ Then Directory "/tmp/report" created
+
+ When Run "mv /etc/corosync/corosync.conf /etc/corosync/corosync.bak" on "hanode1"
+ When Try "crm report" on "hanode1"
+ Then Expected "File /etc/corosync/corosync.conf does not exist" in stderr
+ When Run "mv /etc/corosync/corosync.bak /etc/corosync/corosync.conf" on "hanode1"
+
+ When Run "mv /var/lib/pacemaker/pengine /var/lib/pacemaker/pengine_bak" on "hanode1"
+ When Try "crm report" on "hanode1"
+ Then Expected "Cannot find PE directory" in stderr
+ When Run "mv /var/lib/pacemaker/pengine_bak /var/lib/pacemaker/pengine" on "hanode1"
+
+ When Run "crm cluster stop --all" on "hanode1"
+ When Run "rm -f /var/lib/pacemaker/cib/cib*" on "hanode1"
+ When Run "rm -f /var/lib/pacemaker/cib/cib*" on "hanode2"
+ When Try "crm report" on "hanode1"
+ Then Expected "Could not figure out a list of nodes; is this a cluster node" in stderr
diff --git a/test/features/environment.py b/test/features/environment.py
new file mode 100644
index 0000000..61d2ac2
--- /dev/null
+++ b/test/features/environment.py
@@ -0,0 +1,53 @@
+import logging
+import re
+import subprocess
+import time
+
+import crmsh.userdir
+import crmsh.utils
+from crmsh.sh import ShellUtils
+
+
+def get_online_nodes():
+ _, out, _ = ShellUtils().get_stdout_stderr('sudo crm_node -l')
+ if out:
+ return re.findall(r'[0-9]+ (.*) member', out)
+ else:
+ return None
+
+
+def resource_cleanup():
+ subprocess.run(
+ ['sudo', 'crm', 'resource', 'cleanup'],
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+
+
+def before_step(context, step):
+ context.logger = logging.getLogger("Step:{}".format(step.name))
+
+
+def before_tag(context, tag):
+ # tag @clean means need to stop cluster service
+ if tag == "clean":
+ time.sleep(3)
+ online_nodes = get_online_nodes()
+ if online_nodes:
+ resource_cleanup()
+ while True:
+ time.sleep(1)
+ rc, stdout, _ = ShellUtils().get_stdout_stderr('sudo crmadmin -D -t 1')
+ if rc == 0 and stdout.startswith('Designated'):
+ break
+ subprocess.call(
+ ['sudo', 'crm', 'cluster', 'stop', '--all'],
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ if tag == "skip_non_root":
+ sudoer = crmsh.userdir.get_sudoer()
+ if sudoer or crmsh.userdir.getuser() != 'root':
+ context.scenario.skip()
diff --git a/test/features/geo_setup.feature b/test/features/geo_setup.feature
new file mode 100644
index 0000000..b26b04e
--- /dev/null
+++ b/test/features/geo_setup.feature
@@ -0,0 +1,29 @@
+@geo
+Feature: geo cluster
+
+ Test geo cluster setup using bootstrap
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ @clean
+ Scenario: GEO cluster setup
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "crm cluster init -y -n cluster1" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.0" on "hanode1"
+
+ When Run "crm cluster init -y -n cluster2" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.1" on "hanode2"
+
+ When Run "crm cluster geo_init -y --clusters "cluster1=@vip.0 cluster2=@vip.1" --tickets tickets-geo --arbitrator hanode3" on "hanode1"
+ When Run "crm cluster geo_join -y --cluster-node hanode1 --clusters "cluster1=@vip.0 cluster2=@vip.1"" on "hanode2"
+
+ Given Service "booth@booth" is "stopped" on "hanode3"
+ When Run "crm cluster geo_init_arbitrator -y --cluster-node hanode1" on "hanode3"
+ Then Service "booth@booth" is "started" on "hanode3"
+ When Run "crm resource start g-booth" on "hanode1"
+ Then Show cluster status on "hanode1"
+ When Run "crm resource start g-booth" on "hanode2"
+ Then Show cluster status on "hanode2"
diff --git a/test/features/healthcheck.feature b/test/features/healthcheck.feature
new file mode 100644
index 0000000..da7f78a
--- /dev/null
+++ b/test/features/healthcheck.feature
@@ -0,0 +1,37 @@
+@healthcheck
+Feature: healthcheck detect and fix problems in a crmsh deployment
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3
+
+ Background: Setup a two nodes cluster
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Show cluster status on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Show cluster status on "hanode1"
+
+ @clean
+ Scenario: a new node joins when directory ~hacluster/.ssh is removed from cluster
+ When Run "rm -rf ~hacluster/.ssh" on "hanode1"
+ And Run "rm -rf ~hacluster/.ssh" on "hanode2"
+ And Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ # FIXME: new join implement does not trigger a exception any longer, and the auto fix is not applied
+ # And File "~hacluster/.ssh/id_rsa" exists on "hanode1"
+ # And File "~hacluster/.ssh/id_rsa" exists on "hanode2"
+ # And File "~hacluster/.ssh/id_rsa" exists on "hanode3"
+
+ # skip non-root as behave_agent is not able to run commands interactively with non-root sudoer
+ @skip_non_root
+ @clean
+ Scenario: An upgrade_seq file in ~hacluster/crmsh/ will be migrated to /var/lib/crmsh (bsc#1213050)
+ When Run "mv /var/lib/crmsh ~hacluster/" on "hanode1"
+ Then File "~hacluster/crmsh/upgrade_seq" exists on "hanode1"
+ When Run "crm cluster status" on "hanode1"
+ Then File "/var/lib/crmsh/upgrade_seq" exists on "hanode1"
diff --git a/test/features/ocfs2.feature b/test/features/ocfs2.feature
new file mode 100644
index 0000000..29b4b1a
--- /dev/null
+++ b/test/features/ocfs2.feature
@@ -0,0 +1,61 @@
+@ocfs2
+Feature: OCFS2 configuration/verify using bootstrap
+
+@clean
+Scenario: Configure ocfs2 along with init process
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
+
+@clean
+Scenario: Configure cluster lvm2 + ocfs2 with init process
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ And Has disk "/dev/sda3" on "hanode1"
+ When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -o /dev/sda3 -C -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode1"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started"
+ And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
+
+@clean
+Scenario: Add ocfs2 alone on a running cluster
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ And Has disk "/dev/sda1" on "hanode2"
+ And Has disk "/dev/sda2" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ And Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Online nodes are "hanode1 hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster init ocfs2 -o /dev/sda2 -y" on "hanode1"
+ Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
+
+@clean
+Scenario: Add cluster lvm2 + ocfs2 on a running cluster
+ Given Has disk "/dev/sda1" on "hanode1"
+ And Has disk "/dev/sda2" on "hanode1"
+ And Has disk "/dev/sda1" on "hanode2"
+ And Has disk "/dev/sda2" on "hanode2"
+ When Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+ And Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Online nodes are "hanode1 hanode2"
+ And Service "sbd" is "started" on "hanode1"
+ And Service "sbd" is "started" on "hanode2"
+ And Resource "stonith-sbd" type "external/sbd" is "Started"
+ When Run "crm cluster init ocfs2 -o /dev/sda2 -C -y" on "hanode1"
+ Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started"
+ And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started"
+ And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started"
+ And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
diff --git a/test/features/qdevice_options.feature b/test/features/qdevice_options.feature
new file mode 100644
index 0000000..e0277a7
--- /dev/null
+++ b/test/features/qdevice_options.feature
@@ -0,0 +1,50 @@
+@qdevice
+Feature: corosync qdevice/qnetd options
+
+ Test corosync qdevice/qnetd options:
+ "--qdevice-algo": QNetd decision ALGORITHM(ffsplit/lms, default:ffsplit)
+ "--qdevice-ti-breaker": QNetd TIE_BREAKER(lowest/highest/valid_node_id, default:lowest)
+ "--qdevice-tls": Whether using TLS on QDevice/QNetd(on/off/required, default:on)
+ "--qdevice-heuristics": COMMAND to run with absolute path. For multiple commands, use ";" to separate
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 qnetd-node
+
+ @clean
+ Scenario: Use "--qdevice-algo" to change qnetd decision algorithm to "lms"
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-algo=lms -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Use "--qdevice-tie-breaker" to change qnetd tie_breaker to "highest"
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-tie-breaker=highest -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Use "--qdevice-tls" to turn off TLS certification
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-tls=off -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Use "--qdevice-heuristics" to configure heuristics
+ Given Cluster service is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='/usr/bin/test -f /tmp/file_exists;/usr/bin/which pacemaker' -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show corosync qdevice configuration
diff --git a/test/features/qdevice_setup_remove.feature b/test/features/qdevice_setup_remove.feature
new file mode 100644
index 0000000..df7af3d
--- /dev/null
+++ b/test/features/qdevice_setup_remove.feature
@@ -0,0 +1,173 @@
+@qdevice
+Feature: corosync qdevice/qnetd setup/remove process
+
+ Test corosync qdevice/qnetd setup/remove process
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3 hanode4 qnetd-node
+
+ Background: Cluster and qdevice service are stopped
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+
+ @clean
+ Scenario: Setup qdevice/qnetd during init/join process
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ # for bsc#1181415
+ Then Expected "Restarting cluster service" in stdout
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+ And Show status from qnetd
+ And Show corosync qdevice configuration
+ And Show qdevice status
+
+ @clean
+ Scenario: Setup qdevice/qnetd on running cluster
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ When Write multi lines to file "/etc/corosync/corosync.conf" on "hanode1"
+ """
+ # This is a test for bsc#1166684
+
+ """
+ When Write multi lines to file "/etc/corosync/corosync.conf" on "hanode2"
+ """
+ # This is a test for bsc#1166684
+
+ """
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ # for bsc#1181415
+ Then Expected "Starting corosync-qdevice.service in cluster" in stdout
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+ And Show status from qnetd
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Remove qdevice from a two nodes cluster
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Show corosync qdevice configuration
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+ And Show corosync qdevice configuration
+
+ @clean
+ Scenario: Setup qdevice on multi nodes
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Expected votes will be "3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Service "corosync-qdevice" is "started" on "hanode3"
+ And Expected votes will be "4"
+ When Run "crm cluster join -c hanode1 -y" on "hanode4"
+ Then Cluster service is "started" on "hanode4"
+ And Online nodes are "hanode1 hanode2 hanode3 hanode4"
+ And Service "corosync-qdevice" is "started" on "hanode4"
+ And Expected votes will be "5"
+ And Show corosync qdevice configuration
+ And Show status from qnetd
+
+ @clean
+ Scenario: Setup qdevice on multi nodes existing cluster
+ When Run "crm cluster init -u -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster join -c hanode1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ When Run "crm cluster join -c hanode1 -y" on "hanode4"
+ Then Cluster service is "started" on "hanode4"
+ And Online nodes are "hanode1 hanode2 hanode3 hanode4"
+ And Expected votes will be "4"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Show corosync qdevice configuration
+ And Expected votes will be "5"
+ And Service "corosync-qdevice" is "started" on "hanode4"
+ And Service "corosync-qdevice" is "started" on "hanode3"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show status from qnetd
+
+ @clean
+ Scenario: Setup qdevice using IPv6
+ When Run "crm cluster init -u -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ When Run "crm cluster init qdevice --qnetd-hostname @qnetd-node.ip6.0 -y" on "hanode1"
+ Then Show corosync qdevice configuration
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Show status from qnetd
+
+ @skip_non_root
+ @clean
+ Scenario: Passwordless for root, not for sudoer (bsc#1209193)
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "useradd -m -s /bin/bash xin" on "hanode1"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode1"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
+ When Run "useradd -m -s /bin/bash xin" on "hanode2"
+ When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode2"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
+ When Run "su xin -c "sudo crm cluster init qdevice --qnetd-hostname=qnetd-node -y"" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+
+ @skip_non_root
+ @clean
+ Scenario: Missing crm/crm.conf (bsc#1209193)
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode1"
+ When Run "rm -f /root/.config/crm/crm.conf" on "hanode2"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+
+ @clean
+ Scenario: One qnetd for multi cluster, add in parallel
+ When Run "crm cluster init -n cluster1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster init -n cluster2 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ When Run "crm cluster init qdevice --qnetd-hostname qnetd-node -y" on "hanode2,hanode3"
+ Then Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode3"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
diff --git a/test/features/qdevice_usercase.feature b/test/features/qdevice_usercase.feature
new file mode 100644
index 0000000..c35d2cb
--- /dev/null
+++ b/test/features/qdevice_usercase.feature
@@ -0,0 +1,87 @@
+@qdevice
+Feature: Verify usercase master survive when split-brain
+
+ Steps to setup a two-nodes cluster with heuristics qdevice,
+ started with a promotable clone resource, and make sure master side always with quorum:
+ 1. Setup a two-nodes cluster
+ 2. Generate script to check whether this node is master
+ 3. Add a promotable clone resource
+ 4. Setup qdevice with heuristics
+ 5. Use iptables command to simulate split-brain
+ 6. Check whether hanode1 has quorum, while hanode2 doesn't
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 qnetd-node
+
+ Background: Cluster and qdevice service are stopped
+ Given Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode2"
+
+ @clean
+ Scenario: Setup qdevice with heuristics
+ When Run "crm cluster init -y --qnetd-hostname=qnetd-node --qdevice-heuristics="/usr/bin/test -f /tmp/heuristics.txt" --qdevice-heuristics-mode="on"" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+ And Show corosync qdevice configuration
+ When Run "crm corosync status qnetd" on "hanode1"
+ Then Expected regrex "Heuristics:\s+Fail" in stdout
+ When Run "touch /tmp/heuristics.txt" on "hanode1"
+ When Run "sleep 30" on "hanode1"
+ When Run "crm corosync status qnetd" on "hanode1"
+ Then Expected regrex "Heuristics:\s+Pass" in stdout
+
+ @clean
+ Scenario: Master survive when split-brain
+ # Setup a two-nodes cluster
+ When Run "crm cluster init -y -i eth0" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y -i eth0" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+
+ # Generate script to check whether this node is master
+ When Write multi lines to file "/etc/corosync/qdevice/check_master.sh" on "hanode1"
+ """
+ #!/usr/bin/sh
+ crm_resource --locate -r promotable-1 2>&1 | grep -E "Master|Promoted" | grep `crm_node -n` >/dev/null 2>&1
+ """
+ And Run "chmod +x /etc/corosync/qdevice/check_master.sh" on "hanode1"
+ When Write multi lines to file "/etc/corosync/qdevice/check_master.sh" on "hanode2"
+ """
+ #!/usr/bin/sh
+ crm_resource --locate -r promotable-1 2>&1 | grep -E "Master|Promoted" | grep `crm_node -n` >/dev/null 2>&1
+ """
+ And Run "chmod +x /etc/corosync/qdevice/check_master.sh" on "hanode2"
+ # Add a promotable clone resource and make sure hanode1 is master
+ And Run "crm configure primitive stateful-1 ocf:pacemaker:Stateful op monitor role=Promoted interval=10s op monitor role=Unpromoted interval=5s" on "hanode1"
+ And Run "crm configure clone promotable-1 stateful-1 meta promotable=true" on "hanode1"
+ And Run "sleep 5" on "hanode1"
+ Then Show cluster status on "hanode1"
+
+ # Setup qdevice with heuristics
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node --qdevice-heuristics=/etc/corosync/qdevice/check_master.sh -y" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ When Run "sleep 5" on "hanode1"
+ Then Show status from qnetd
+ When Run "corosync-quorumtool -s" on "hanode1"
+ Then Expected "Quorate: Yes" in stdout
+ # Use iptables command to simulate split-brain
+ When Run "iptables -I INPUT -s @hanode2.ip.default -j DROP; sudo iptables -I OUTPUT -d @hanode2.ip.default -j DROP" on "hanode1"
+ And Run "iptables -I INPUT -s @hanode1.ip.default -j DROP; sudo iptables -I OUTPUT -d @hanode1.ip.default -j DROP" on "hanode2"
+ # Check whether hanode1 has quorum, while hanode2 doesn't
+ And Run "sleep 20" on "hanode1"
+ When Run "crm corosync status quorum" on "hanode1"
+ Then Expected "Quorate: Yes" in stdout
+ When Run "crm corosync status quorum" on "hanode2"
+ Then Expected "Quorate: No" in stdout
+ And Show cluster status on "hanode1"
+ And Show cluster status on "hanode2"
+ When Try "crm corosync status fs" on "hanode1"
+ Then Expected "Wrong type "fs" to query status" in stderr
diff --git a/test/features/qdevice_validate.feature b/test/features/qdevice_validate.feature
new file mode 100644
index 0000000..5403a52
--- /dev/null
+++ b/test/features/qdevice_validate.feature
@@ -0,0 +1,161 @@
+@qdevice
+Feature: corosync qdevice/qnetd options validate
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2 hanode3 qnetd-node node-without-ssh
+
+ @clean
+ Scenario: Option "--qnetd-hostname" use the same node
+ When Try "crm cluster init --qnetd-hostname=hanode1"
+ Then Except "ERROR: cluster.init: host for qnetd must be a remote one"
+
+ @clean
+ Scenario: Option "--qnetd-hostname" use hanode1's IP
+ When Try "crm cluster init --qnetd-hostname=@hanode1.ip.0"
+ Then Except "ERROR: cluster.init: host for qnetd must be a remote one"
+
+ @clean
+ Scenario: Option "--qnetd-hostname" use unknown hostname
+ When Try "crm cluster init --qnetd-hostname=error-node"
+ Then Except "ERROR: cluster.init: host "error-node" is unreachable"
+
+ @clean
+ Scenario: Service ssh on qnetd node not available
+ When Run "systemctl stop sshd.service" on "node-without-ssh"
+ When Try "crm cluster init --qnetd-hostname=node-without-ssh"
+ Then Except "ERROR: cluster.init: ssh service on "node-without-ssh" not available"
+
+ @clean
+ Scenario: Option "--qdevice-port" set wrong port
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-port=1"
+ Then Except "ERROR: cluster.init: invalid qdevice port range(1024 - 65535)"
+
+ @clean
+ Scenario: Option "--qdevice-tie-breaker" set wrong value
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-tie-breaker=wrongtiebreaker"
+ Then Except "ERROR: cluster.init: invalid qdevice tie_breaker(lowest/highest/valid_node_id)"
+
+ @clean
+ Scenario: Option "--qdevice-heuristics" set wrong value
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='ls /opt'"
+ Then Except "ERROR: cluster.init: commands for heuristics should be absolute path"
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='/bin/not_exist_cmd /opt'"
+ Then Except "ERROR: cluster.init: command /bin/not_exist_cmd not exist"
+
+ @clean
+ Scenario: Option "--qnetd-hostname" is required by other qdevice options
+ When Try "crm cluster init --qdevice-port=1234"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Option --qnetd-hostname is required if want to configure qdevice
+ """
+
+ @clean
+ Scenario: Option --qdevice-heuristics is required if want to configure heuristics mode
+ When Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics-mode="on""
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Option --qdevice-heuristics is required if want to configure heuristics mode
+ """
+
+ @clean
+ Scenario: Node for qnetd not installed corosync-qnetd
+ Given Cluster service is "stopped" on "hanode2"
+ When Try "crm cluster init --qnetd-hostname=hanode2 -y"
+ Then Except multiple lines
+ """
+ ERROR: cluster.init: Package "corosync-qnetd" not installed on hanode2!
+ Cluster service already successfully started on this node except qdevice service.
+ If you still want to use qdevice, install "corosync-qnetd" on hanode2.
+ Then run command "crm cluster init" with "qdevice" stage, like:
+ crm cluster init qdevice qdevice_related_options
+ That command will setup qdevice separately.
+ """
+ And Cluster service is "started" on "hanode1"
+
+ @clean
+ Scenario: Raise error when adding qdevice stage with the same cluster name
+ Given Cluster service is "stopped" on "hanode2"
+ Given Cluster service is "stopped" on "hanode3"
+ When Run "crm cluster init -n cluster1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm cluster init -n cluster1 -y" on "hanode3"
+ Then Cluster service is "started" on "hanode3"
+ When Try "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode2,hanode3"
+ Then Except "ERROR: cluster.init: Duplicated cluster name "cluster1"!"
+ When Run "crm cluster stop" on "hanode2"
+ When Run "crm cluster stop" on "hanode3"
+
+ @clean
+ Scenario: Raise error when the same cluster name already exists on qnetd
+ Given Cluster service is "stopped" on "hanode1"
+ Given Cluster service is "stopped" on "hanode2"
+ When Try "crm cluster init -n cluster1 --qnetd-hostname=qnetd-node -y" on "hanode2"
+ When Try "crm cluster init -n cluster1 --qnetd-hostname=qnetd-node -y"
+ Then Except multiple lines
+ """
+ ERROR: cluster.init: This cluster's name "cluster1" already exists on qnetd server!
+ Cluster service already successfully started on this node except qdevice service.
+ If you still want to use qdevice, consider to use the different cluster-name property.
+ Then run command "crm cluster init" with "qdevice" stage, like:
+ crm cluster init qdevice qdevice_related_options
+ That command will setup qdevice separately.
+ """
+ And Cluster service is "started" on "hanode1"
+ And Cluster service is "started" on "hanode2"
+
+ @clean
+ Scenario: Run qdevice stage on inactive cluster node
+ Given Cluster service is "stopped" on "hanode1"
+ When Try "crm cluster init qdevice --qnetd-hostname=qnetd-node"
+ Then Except "ERROR: cluster.init: Cluster is inactive - can't run qdevice stage"
+
+ @clean
+ Scenario: Run qdevice stage but miss "--qnetd-hostname" option
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Try "crm cluster init qdevice -y"
+ Then Except multiple lines
+ """
+ usage: init [options] [STAGE]
+ crm: error: Option --qnetd-hostname is required if want to configure qdevice
+ """
+
+ @clean
+ Scenario: Setup qdevice on a single node cluster with RA running(bsc#1181415)
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Expected "WARNING: To use qdevice service, need to restart cluster service manually on each node" in stderr
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+ When Run "crm cluster restart" on "hanode1"
+ Then Service "corosync-qdevice" is "started" on "hanode1"
+
+ @clean
+ Scenario: Remove qdevice from a single node cluster(bsc#1181415)
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Expected "Restarting cluster service" in stdout
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
+
+ @clean
+ Scenario: Remove qdevice from a single node cluster which has RA running(bsc#1181415)
+ When Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ When Run "crm cluster remove --qdevice -y" on "hanode1"
+ Then Expected "WARNING: To remove qdevice service, need to restart cluster service manually on each node" in stderr
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ When Run "crm cluster restart" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Service "corosync-qdevice" is "stopped" on "hanode1"
diff --git a/test/features/resource_failcount.feature b/test/features/resource_failcount.feature
new file mode 100644
index 0000000..69f402a
--- /dev/null
+++ b/test/features/resource_failcount.feature
@@ -0,0 +1,61 @@
+@resource
+Feature: Use "crm resource failcount" to manage failcounts
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1
+
+ Background: Setup one node cluster and configure a Dummy resource
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ Then Resource "d" type "Dummy" is "Started"
+
+ @clean
+ Scenario: Validation, input the wrong parameters
+ When Try "crm resource failcount d showss hanode1"
+ Then Except "ERROR: resource.failcount: showss is not valid command(should be one of ['set', 'delete', 'show'])"
+ When Try "crm resource failcount d set hanode11 0"
+ Then Except "ERROR: resource.failcount: Node hanode11 not in this cluster"
+
+ @clean
+ Scenario: Set the failcount to 0
+ When Run "rm -f /run/resource-agents/Dummy-d.state" on "hanode1"
+ And Wait "5" seconds
+ Then Resource "d" failcount on "hanode1" is "1"
+ When Run "crm resource failcount d set hanode1 0" on "hanode1"
+ Then Resource "d" failcount on "hanode1" is "0"
+
+ @clean
+ Scenario: Set multiple failcounts to 0
+ When Run "sed -i -e '/rm \${OCF_RESKEY_state}/a\' -e "else\nreturn \$OCF_ERR_GENERIC" /usr/lib/ocf/resource.d/heartbeat/Dummy" on "hanode1"
+ And Run "rm -f /run/resource-agents/Dummy-d.state" on "hanode1"
+ And Wait "5" seconds
+ Then Resource "d" failcount on "hanode1" is "INFINITY"
+ """
+ now have two failcount entries, one is monitor, another is stop
+ """
+ When Run "crm resource failcount d set hanode1 0" on "hanode1"
+ """
+ set all failcounts to 0
+ """
+ Then Resource "d" failcount on "hanode1" is "0"
+ When Run "crm resource cleanup" on "hanode1"
+ And Wait "5" seconds
+ And Run "rm -f /run/resource-agents/Dummy-d.state" on "hanode1"
+ And Wait "5" seconds
+ Then Resource "d" failcount on "hanode1" is "INFINITY"
+ """
+ now have two failcount entries, one is monitor, another is stop
+ """
+ When Run "crm resource failcount d set hanode1 0 stop" on "hanode1"
+ """
+ set stop failcounts to 0
+ """
+ Then Resource "d" failcount on "hanode1" is "1"
+ When Run "crm resource failcount d set hanode1 0 monitor" on "hanode1"
+ """
+ set monitor failcounts to 0
+ """
+ Then Resource "d" failcount on "hanode1" is "0"
+
diff --git a/test/features/resource_set.feature b/test/features/resource_set.feature
new file mode 100644
index 0000000..a6726d7
--- /dev/null
+++ b/test/features/resource_set.feature
@@ -0,0 +1,154 @@
+@resource
+Feature: Use "crm configure set" to update attributes and operations
+
+ Tag @clean means need to stop cluster service if the service is available
+ Need nodes: hanode1 hanode2
+
+ Background: Setup cluster and configure some resources
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "crm cluster init -y" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm cluster join -c hanode1 -y" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
+ Then Resource "d" type "Dummy" is "Started"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.0 op monitor interval=3s" on "hanode1"
+ Then Resource "vip" type "IPaddr2" is "Started"
+ And Cluster virtual IP is "@vip.0"
+ When Run "crm configure primitive s ocf:pacemaker:Stateful op monitor role=Promoted interval=3s op monitor role=Unpromoted interval=5s" on "hanode1"
+ Then Resource "s" type "Stateful" is "Started"
+
+ @clean
+ Scenario: Validation, input the wrong parameters
+ When Try "crm configure set path"
+ Then Except "ERROR: configure.set: Expected (path value), takes exactly 2 arguments (1 given)"
+ When Try "crm configure set xxxx value"
+ Then Except "ERROR: configure.set: Invalid path: "xxxx"; Valid path: "id.[op_type.][interval.]name""
+ When Try "crm configure set xxxx.name value"
+ Then Except "ERROR: configure.set: Object xxxx not found"
+ When Try "crm configure set d.name value"
+ Then Except "ERROR: configure.set: Attribute not found: d.name"
+ When Try "crm configure set d.monitor.100.timeout 10"
+ Then Except "ERROR: configure.set: Operation "monitor" interval "100" not found for resource d"
+ When Try "crm configure set s.monitor.interval 20"
+ Then Except "ERROR: configure.set: Should specify interval of monitor"
+
+ @clean
+ Scenario: Using configure.set to update resource parameters and operation values
+ When Run "crm configure set vip.ip @vip.0" on "hanode1"
+ Then Cluster virtual IP is "@vip.0"
+ When Run "crm configure set d.monitor.on-fail ignore" on "hanode1"
+ And Run "crm configure show d" on "hanode1"
+ Then Expected "on-fail=ignore" in stdout
+ When Run "crm configure set s.monitor.5s.interval 20s" on "hanode1"
+ And Run "crm configure show s" on "hanode1"
+ Then Expected "interval=20s" in stdout
+ When Run "crm configure set op-options.timeout 101" on "hanode1"
+ And Run "crm configure show op-options" on "hanode1"
+ Then Expected "timeout=101" in stdout
+
+ @clean
+ Scenario: Parse node and lifetime correctly (bsc#1192618)
+ Given Resource "d" is started on "hanode1"
+ # move <res> <node>
+ When Run "crm resource move d hanode2" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <node> force
+ When Run "crm resource move d hanode1" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> force
+ When Run "crm resource move d force" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <lifetime> force
+ When Run "crm resource move d PT5M force" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <node> <lifetime>
+ When Run "crm resource move d hanode2 PT5M" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode2"
+ When Run "crm resource clear d" on "hanode1"
+
+ # move <res> <node> <lifetime> force
+ When Run "crm resource move d hanode1 PT5M force" on "hanode1"
+ When Run "sleep 2" on "hanode1"
+ Then Resource "d" is started on "hanode1"
+ When Run "crm resource clear d" on "hanode1"
+
+ When Try "crm resource move d hanode2 PT5M force xxx"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d hanode2 PT5M forcd"
+ Then Except "ERROR: resource.move: usage: move <rsc> [<node>] [<lifetime>] [force]"
+ When Try "crm resource move d xxxx PT5M force"
+ Then Except "ERROR: resource.move: Not our node: xxxx"
+ When Try "crm resource move d"
+ Then Except "ERROR: resource.move: No target node: Move requires either a target node or 'force'"
+
+ @clean
+ Scenario: promote and demote promotable clone resource (bsc#1194125)
+ When Run "crm configure primitive s2 ocf:pacemaker:Stateful op monitor role=Promoted interval=3s op monitor role=Unpromoted interval=5s" on "hanode1"
+ And Run "crm configure clone p2 s2 meta promotable=true" on "hanode1"
+ And Run "crm resource demote p2" on "hanode1"
+ Then Run "sleep 2;! crm_resource --locate -r p2|grep -E 'Master|Promoted'" OK
+ When Run "crm resource promote p2" on "hanode2"
+ Then Run "sleep 2;crm_resource --locate -r p2|grep -E 'Master|Promoted'" OK
+
+ @clean
+ Scenario: operation warning
+ When Run "crm configure primitive id=d2 Dummy op start interval=5s" on "hanode1"
+ Then Expected "WARNING: d2: Specified interval for start is 5s, it must be 0" in stderr
+ When Run "crm configure primitive id=d3 Dummy op monitor interval=0" on "hanode1"
+ Then Expected "WARNING: d3: interval in monitor should be larger than 0, advised is 10s" in stderr
+ When Run "crm configure primitive s2 ocf:pacemaker:Stateful op monitor role=Promoted interval=3s op monitor role=Unpromoted interval=3s" on "hanode1"
+ Then Expected "WARNING: s2: interval in monitor must be unique, advised is 11s" in stderr
+ When Run "crm configure primitive id=d4 Dummy op start timeout=10s" on "hanode1"
+ Then Expected "WARNING: d4: specified timeout 10s for start is smaller than the advised 20s" in stderr
+
+ @clean
+ Scenario: trace ra with specific directory
+ When Run "crm resource trace d monitor" on "hanode1"
+ Then Expected "Trace for d:monitor is written to /var/lib/heartbeat/trace_ra/Dummy" in stdout
+ When Wait "10" seconds
+ Then Run "bash -c 'ls /var/lib/heartbeat/trace_ra/Dummy/d.monitor.*'" OK
+ When Run "crm resource untrace d" on "hanode1"
+ Then Expected "Stop tracing d" in stdout
+ When Run "crm resource trace d monitor /trace_log_d" on "hanode1"
+ Then Expected "Trace for d:monitor is written to /trace_log_d/Dummy" in stdout
+ When Wait "10" seconds
+ Then Run "bash -c 'ls /trace_log_d/Dummy/d.monitor.*'" OK
+ When Run "crm resource untrace d" on "hanode1"
+ Then Expected "Stop tracing d" in stdout
+
+ @clean
+ Scenario: Add promotable=true and interleave=true automatically (bsc#1205522)
+ When Run "crm configure primitive s2 ocf:pacemaker:Stateful" on "hanode1"
+ And Run "crm configure clone p2 s2" on "hanode1"
+ Then Run "sleep 2;crm configure show|grep -A1 'clone p2 s2'|grep 'promotable=true interleave=true'" OK
+ When Run "crm configure primitive s3 ocf:pacemaker:Stateful" on "hanode1"
+ And Run "crm configure clone p3 s3 meta promotable=false" on "hanode1"
+ Then Run "sleep 2;crm configure show|grep -A1 'clone p3 s3'|grep 'promotable=false interleave=true'" OK
+ When Run "crm configure primitive d2 Dummy" on "hanode1"
+ And Run "crm configure clone p4 d2" on "hanode1"
+ Then Run "sleep 2;crm configure show|grep -A1 'clone p4 d2'|grep 'interleave=true'" OK
+
+ @clean
+ Scenario: Run rsctest
+ When Run "crm resource stop d vip" on "hanode1"
+ When Run "crm configure rsctest d vip" on "hanode1"
+ Then Expected multiple lines in output
+ """
+ INFO: Probing resources
+ INFO: Testing on hanode1: d vip
+ INFO: Testing on hanode2: d vip
+ """
diff --git a/test/features/ssh_agent.feature b/test/features/ssh_agent.feature
new file mode 100644
index 0000000..5c632dd
--- /dev/null
+++ b/test/features/ssh_agent.feature
@@ -0,0 +1,86 @@
+# vim: sw=2 sts=2
+Feature: ssh-agent support
+
+ Test ssh-agent support for crmsh
+ Need nodes: hanode1 hanode2 hanode3 qnetd-node
+
+ Scenario: Errors are reported when ssh-agent is not avaible
+ When Try "crm cluster init --use-ssh-agent -y" on "hanode1"
+ Then Expected "Environment variable SSH_AUTH_SOCK does not exist." in stderr
+ When Try "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ Then Expected "Environment variable SSH_AUTH_SOCK does not exist." not in stderr
+
+ Scenario: Errors are reported when there are no keys in ssh-agent
+ Given ssh-agent is started at "/tmp/ssh-auth-sock" on nodes ["hanode1", "hanode2", "hanode3"]
+ When Try "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ Then Expected "ssh-add" in stderr
+
+ Scenario: Skip creating ssh key pairs with --use-ssh-agent
+ Given Run "mkdir ~/ssh_disabled" OK on "hanode1,hanode2,hanode3"
+ And Run "mv ~/.ssh/id_* ~/ssh_disabled" OK on "hanode1,hanode2,hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock ssh-add ~/ssh_disabled/id_rsa" on "hanode1,hanode2,hanode3"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster join --use-ssh-agent -y -c hanode1" on "hanode2"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster join --use-ssh-agent -y -c hanode1" on "hanode3"
+ Then Cluster service is "started" on "hanode1"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ # check the number of keys in authorized_keys
+ And Run "test x1 == x$(awk 'END {print NR}' ~/.ssh/authorized_keys)" OK
+ And Run "test x3 == x$(sudo awk 'END {print NR}' ~hacluster/.ssh/authorized_keys)" OK
+
+ Scenario: Skip creating ssh key pairs with --use-ssh-agent and use -N
+ Given Run "crm cluster stop" OK on "hanode1,hanode2,hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y -N hanode2 -N hanode3" on "hanode1"
+ Then Cluster service is "started" on "hanode3"
+ And Online nodes are "hanode1 hanode2 hanode3"
+ And Run "test x1 == x$(awk 'END {print NR}' ~/.ssh/authorized_keys)" OK on "hanode3"
+ And Run "test x3 == x$(sudo awk 'END {print NR}' ~hacluster/.ssh/authorized_keys)" OK on "hanode3"
+
+ Scenario: crm report
+ Then Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm report /tmp/report1" OK on "hanode1"
+ Then Directory "hanode2" in "/tmp/report1.tar.bz2"
+ Then Directory "hanode3" in "/tmp/report1.tar.bz2"
+
+ Scenario: Use qnetd
+ Given Run "crm cluster stop" OK on "hanode1,hanode2,hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y" on "hanode1"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init qdevice --use-ssh-agent -y --qnetd-hostname qnetd-node" on "hanode1"
+ And Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster join --use-ssh-agent -y -c hanode1" on "hanode2"
+ Then Cluster service is "started" on "hanode1"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+
+ Scenario: Use qnetd with -N
+ Given Run "crm cluster stop" OK on "hanode1,hanode2"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init --use-ssh-agent -y -N hanode2 --qnetd-hostname qnetd-node" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ And Online nodes are "hanode1 hanode2"
+ And Service "corosync-qdevice" is "started" on "hanode1"
+ And Service "corosync-qdevice" is "started" on "hanode2"
+ And Service "corosync-qnetd" is "started" on "qnetd-node"
+
+ Scenario: GEO cluster setup with ssh-agent
+ Given Run "crm cluster stop" OK on "hanode1,hanode2"
+ And Run "systemctl disable --now booth@booth" OK on "hanode1,hanode2,hanode3"
+ And Cluster service is "stopped" on "hanode1"
+ And Cluster service is "stopped" on "hanode2"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init -y -n cluster1 --use-ssh-agent" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.0" on "hanode1"
+
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster init -y -n cluster2 --use-ssh-agent" on "hanode2"
+ Then Cluster service is "started" on "hanode2"
+ When Run "crm configure primitive vip IPaddr2 params ip=@vip.1" on "hanode2"
+
+ When Run "crm cluster geo_init -y --clusters "cluster1=@vip.0 cluster2=@vip.1" --tickets tickets-geo --arbitrator hanode3" on "hanode1"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster geo_join -y --use-ssh-agent --cluster-node hanode1 --clusters "cluster1=@vip.0 cluster2=@vip.1"" on "hanode2"
+
+ Given Service "booth@booth" is "stopped" on "hanode3"
+ When Run "SSH_AUTH_SOCK=/tmp/ssh-auth-sock crm cluster geo_init_arbitrator -y --use-ssh-agent --cluster-node hanode1" on "hanode3"
+ Then Service "booth@booth" is "started" on "hanode3"
+ When Run "crm resource start g-booth" on "hanode1"
+ Then Show cluster status on "hanode1"
+ When Run "crm resource start g-booth" on "hanode2"
+ Then Show cluster status on "hanode2"
diff --git a/test/features/steps/__init__.py b/test/features/steps/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/features/steps/__init__.py
diff --git a/test/features/steps/behave_agent.py b/test/features/steps/behave_agent.py
new file mode 100755
index 0000000..eafeedd
--- /dev/null
+++ b/test/features/steps/behave_agent.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python3
+# behave_agent.py - a simple agent to execute command
+# NO AUTHENTICATIONS. It should only be used in behave test.
+import io
+import os
+import pwd
+import socket
+import struct
+import subprocess
+import typing
+
+
+MSG_EOF = 0
+MSG_USER = 1
+MSG_CMD = 2
+MSG_OUT = 4
+MSG_ERR = 5
+MSG_RC = 6
+
+
+class Message:
+ @staticmethod
+ def write(output, type: int, data: bytes):
+ output.write(struct.pack('!ii', type, len(data)))
+ output.write(data)
+
+ @staticmethod
+ def read(input):
+ buf = input.read(8)
+ type, length = struct.unpack('!ii', buf)
+ if length > 0:
+ buf = input.read(length)
+ else:
+ buf = b''
+ return type, buf
+
+
+class SocketIO(io.RawIOBase):
+ def __init__(self, s: socket.socket):
+ self._socket = s
+
+ def readable(self) -> bool:
+ return True
+
+ def writable(self) -> bool:
+ return True
+
+ def read(self, __size: int = -1) -> bytes:
+ return self._socket.recv(__size)
+
+ def readinto(self, __buffer) -> int:
+ return self._socket.recv_into(__buffer)
+
+ def readall(self) -> bytes:
+ raise NotImplementedError
+
+ def write(self, __b) -> int:
+ return self._socket.send(__b)
+
+
+def call(host: str, port: int, cmdline: str, user: typing.Optional[str] = None):
+ family, type, proto, _, sockaddr = socket.getaddrinfo(host, port, type=socket.SOCK_STREAM)[0]
+ with socket.socket(family, type, proto) as s:
+ s.connect(sockaddr)
+ sout = io.BufferedWriter(SocketIO(s), 4096)
+ Message.write(sout, MSG_USER, user.encode('utf-8') if user else _getuser().encode('utf-8'))
+ Message.write(sout, MSG_CMD, cmdline.encode('utf-8'))
+ Message.write(sout, MSG_EOF, b'')
+ sout.flush()
+ s.shutdown(socket.SHUT_WR)
+ rc = None
+ stdout = []
+ stderr = []
+ sin = io.BufferedReader(SocketIO(s), 4096)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_OUT:
+ stdout.append(buf)
+ elif type == MSG_ERR:
+ stderr.append(buf)
+ elif type == MSG_RC:
+ rc, = struct.unpack('!i', buf)
+ elif type == MSG_EOF:
+ assert rc is not None
+ return rc, b''.join(stdout), b''.join(stderr)
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+
+
+def serve(stdin, stdout, stderr):
+ # This is an xinetd-style service.
+ assert os.geteuid() == 0
+ user = None
+ cmd = None
+ sin = io.BufferedReader(stdin)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_USER:
+ user = buf.decode('utf-8')
+ elif type == MSG_CMD:
+ cmd = buf.decode('utf-8')
+ elif type == MSG_EOF:
+ assert user is not None
+ assert cmd is not None
+ break
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+ if user == 'root':
+ args = ['/bin/sh']
+ else:
+ args = ['/bin/su', '-', user, '-c', '/bin/sh']
+ result = subprocess.run(
+ args,
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ sout = io.BufferedWriter(stdout)
+ Message.write(sout, MSG_RC, struct.pack('!i', result.returncode))
+ Message.write(sout, MSG_OUT, result.stdout)
+ Message.write(sout, MSG_ERR, result.stderr)
+ Message.write(sout, MSG_EOF, b'')
+ stdout.flush()
+
+
+def _getuser():
+ return pwd.getpwuid(os.geteuid()).pw_name
+
+
+if __name__ == '__main__':
+ with open(0, 'rb') as stdin, \
+ open(1, 'wb') as stdout, \
+ open(2, 'wb') as stderr:
+ serve(stdin, stdout, stderr)
diff --git a/test/features/steps/const.py b/test/features/steps/const.py
new file mode 100644
index 0000000..3ec8845
--- /dev/null
+++ b/test/features/steps/const.py
@@ -0,0 +1,353 @@
+CRM_H_OUTPUT = '''usage: crm [-h|--help] [OPTIONS] [SUBCOMMAND ARGS...]
+or crm help SUBCOMMAND
+
+For a list of available subcommands, use crm help.
+
+Use crm without arguments for an interactive session.
+Call a subcommand directly for a "single-shot" use.
+Call crm with a level name as argument to start an interactive
+session from that level.
+
+See the crm(8) man page or call crm help for more details.
+
+positional arguments:
+ SUBCOMMAND
+
+optional arguments:
+ -h, --help show this help message and exit
+ --version show program's version number and exit
+ -f FILE, --file FILE Load commands from the given file. If a dash (-) is
+ used in place of a file name, crm will read commands
+ from the shell standard input (stdin).
+ -c CIB, --cib CIB Start the session using the given shadow CIB file.
+ Equivalent to `cib use <CIB>`.
+ -D OUTPUT_TYPE, --display OUTPUT_TYPE
+ Choose one of the output options: plain, color-always,
+ color, or uppercase. The default is color if the
+ terminal emulation supports colors, else plain.
+ -F, --force Make crm proceed with applying changes where it would
+ normally ask the user to confirm before proceeding.
+ This option is mainly useful in scripts, and should be
+ used with care.
+ -n, --no Automatically answer no when prompted
+ -w, --wait Make crm wait for the cluster transition to finish
+ (for the changes to take effect) after each processed
+ line.
+ -H DIR|FILE|SESSION, --history DIR|FILE|SESSION
+ A directory or file containing a cluster report to
+ load into history, or the name of a previously saved
+ history session.
+ -d, --debug Print verbose debugging information.
+ -R, --regression-tests
+ Enables extra verbose trace logging used by the
+ regression tests. Logs all external calls made by
+ crmsh.
+ --scriptdir DIR Extra directory where crm looks for cluster scripts,
+ or a list of directories separated by semi-colons
+ (e.g. /dir1;/dir2;etc.).
+ -X PROFILE Collect profiling data and save in PROFILE.
+ -o OPTION=VALUE, --opt OPTION=VALUE
+ Set crmsh option temporarily. If the options are saved
+ using+options save+ then the value passed here will
+ also be saved.Multiple options can be set by using
+ +-o+ multiple times.'''
+
+
+CRM_CLUSTER_INIT_H_OUTPUT = '''Initializes a new HA cluster
+
+usage: init [options] [STAGE]
+
+Initialize a cluster from scratch. This command configures
+a complete cluster, and can also add additional cluster
+nodes to the initial one-node cluster using the --nodes
+option.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution, this is
+ destructive, especially those storage related
+ configurations and stages.)
+ -n NAME, --name NAME Set the name of the configured cluster.
+ -N [USER@]HOST, --node [USER@]HOST
+ The member node of the cluster. Note: the current node
+ is always get initialized during bootstrap in the
+ beginning.
+ -S, --enable-sbd Enable SBD even if no SBD device is configured
+ (diskless mode)
+ -w WATCHDOG, --watchdog WATCHDOG
+ Use the given watchdog device or driver name
+ -x, --skip-csync2-sync
+ Skip csync2 initialization (an experimental option)
+ --no-overwrite-sshkey
+ Avoid "/root/.ssh/id_rsa" overwrite if "-y" option is
+ used (False by default; Deprecated)
+ --use-ssh-agent Use an existing key from ssh-agent instead of creating
+ new key pairs
+
+Network configuration:
+ Options for configuring the network and messaging layer.
+
+ -i IF, --interface IF
+ Bind to IP address on interface IF. Use -i second time
+ for second interface
+ -u, --unicast Configure corosync to communicate over unicast(udpu).
+ This is the default transport type
+ -U, --multicast Configure corosync to communicate over multicast.
+ Default is unicast
+ -A IP, --admin-ip IP Configure IP address as an administration virtual IP
+ -M, --multi-heartbeats
+ Configure corosync with second heartbeat line
+ -I, --ipv6 Configure corosync use IPv6
+
+QDevice configuration:
+ QDevice participates in quorum decisions. With the assistance of
+ a third-party arbitrator Qnetd, it provides votes so that a cluster
+ is able to sustain more node failures than standard quorum rules
+ allow. It is recommended for clusters with an even number of nodes
+ and highly recommended for 2 node clusters.
+
+ Options for configuring QDevice and QNetd.
+
+ --qnetd-hostname [USER@]HOST
+ User and host of the QNetd server. The host can be
+ specified in either hostname or IP address.
+ --qdevice-port PORT TCP PORT of QNetd server (default:5403)
+ --qdevice-algo ALGORITHM
+ QNetd decision ALGORITHM (ffsplit/lms,
+ default:ffsplit)
+ --qdevice-tie-breaker TIE_BREAKER
+ QNetd TIE_BREAKER (lowest/highest/valid_node_id,
+ default:lowest)
+ --qdevice-tls TLS Whether using TLS on QDevice/QNetd (on/off/required,
+ default:on)
+ --qdevice-heuristics COMMAND
+ COMMAND to run with absolute path. For multiple
+ commands, use ";" to separate (details about
+ heuristics can see man 8 corosync-qdevice)
+ --qdevice-heuristics-mode MODE
+ MODE of operation of heuristics (on/sync/off,
+ default:sync)
+
+Storage configuration:
+ Options for configuring shared storage.
+
+ -s DEVICE, --sbd-device DEVICE
+ Block device to use for SBD fencing, use ";" as
+ separator or -s multiple times for multi path (up to 3
+ devices)
+ -o DEVICE, --ocfs2-device DEVICE
+ Block device to use for OCFS2; When using Cluster LVM2
+ to manage the shared storage, user can specify one or
+ multiple raw disks, use ";" as separator or -o
+ multiple times for multi path (must specify -C option)
+ NOTE: this is a Technical Preview
+ -C, --cluster-lvm2 Use Cluster LVM2 (only valid together with -o option)
+ NOTE: this is a Technical Preview
+ -m MOUNT, --mount-point MOUNT
+ Mount point for OCFS2 device (default is
+ /srv/clusterfs, only valid together with -o option)
+ NOTE: this is a Technical Preview
+
+Stage can be one of:
+ ssh Create SSH keys for passwordless SSH between cluster nodes
+ csync2 Configure csync2
+ corosync Configure corosync
+ sbd Configure SBD (requires -s <dev>)
+ cluster Bring the cluster online
+ ocfs2 Configure OCFS2 (requires -o <dev>) NOTE: this is a Technical Preview
+ vgfs Create volume group and filesystem (ocfs2 template only,
+ requires -o <dev>) NOTE: this stage is an alias of ocfs2 stage
+ admin Create administration virtual IP (optional)
+ qdevice Configure qdevice and qnetd
+
+Note:
+ - If stage is not specified, the script will run through each stage
+ in sequence, with prompts for required information.
+
+Examples:
+ # Setup the cluster on the current node
+ crm cluster init -y
+
+ # Setup the cluster with multiple nodes
+ (NOTE: the current node will be part of the cluster even not listed in the -N option as below)
+ crm cluster init -N node1 -N node2 -N node3 -y
+
+ # Setup the cluster on the current node, with two network interfaces
+ crm cluster init -i eth1 -i eth2 -y
+
+ # Setup the cluster on the current node, with disk-based SBD
+ crm cluster init -s <share disk> -y
+
+ # Setup the cluster on the current node, with diskless SBD
+ crm cluster init -S -y
+
+ # Setup the cluster on the current node, with QDevice
+ crm cluster init --qnetd-hostname <qnetd addr> -y
+
+ # Setup the cluster on the current node, with SBD+OCFS2
+ crm cluster init -s <share disk1> -o <share disk2> -y
+
+ # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM
+ crm cluster init -s <share disk1> -o <share disk2> -o <share disk3> -C -y
+
+ # Add SBD on a running cluster
+ crm cluster init sbd -s <share disk> -y
+
+ # Replace SBD device on a running cluster which already configured SBD
+ crm -F cluster init sbd -s <share disk> -y
+
+ # Add diskless SBD on a running cluster
+ crm cluster init sbd -S -y
+
+ # Add QDevice on a running cluster
+ crm cluster init qdevice --qnetd-hostname <qnetd addr> -y
+
+ # Add OCFS2+Cluster LVM on a running cluster
+ crm cluster init ocfs2 -o <share disk1> -o <share disk2> -C -y'''
+
+
+CRM_CLUSTER_JOIN_H_OUTPUT = '''Join existing cluster
+
+usage: join [options] [STAGE]
+
+Join the current node to an existing cluster. The
+current node cannot be a member of a cluster already.
+Pass any node in the existing cluster as the argument
+to the -c option.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -w WATCHDOG, --watchdog WATCHDOG
+ Use the given watchdog device
+ --use-ssh-agent Use an existing key from ssh-agent instead of creating
+ new key pairs
+
+Network configuration:
+ Options for configuring the network and messaging layer.
+
+ -c [USER@]HOST, --cluster-node [USER@]HOST
+ User and host to login to an existing cluster node.
+ The host can be specified with either a hostname or an
+ IP.
+ -i IF, --interface IF
+ Bind to IP address on interface IF. Use -i second time
+ for second interface
+
+Stage can be one of:
+ ssh Obtain SSH keys from existing cluster node (requires -c <host>)
+ csync2 Configure csync2 (requires -c <host>)
+ ssh_merge Merge root's SSH known_hosts across all nodes (csync2 must
+ already be configured).
+ cluster Start the cluster on this node
+
+If stage is not specified, each stage will be invoked in sequence.
+
+Examples:
+ # Join with a cluster node
+ crm cluster join -c <node> -y
+
+ # Join with a cluster node, with the same network interface used by that node
+ crm cluster join -c <node> -i eth1 -i eth2 -y'''
+
+
+CRM_CLUSTER_REMOVE_H_OUTPUT = '''Remove node(s) from the cluster
+
+usage: remove [options] [<node> ...]
+
+Remove one or more nodes from the cluster.
+
+This command can remove the last node in the cluster,
+thus effectively removing the whole cluster. To remove
+the last node, pass --force argument to crm or set
+the config.core.force option.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -c HOST, --cluster-node HOST
+ IP address or hostname of cluster node which will be
+ deleted
+ -F, --force Remove current node
+ --qdevice Remove QDevice configuration and service from cluster'''
+
+
+CRM_CLUSTER_GEO_INIT_H_OUTPUT = '''Configure cluster as geo cluster
+
+usage: geo-init [options]
+
+Create a new geo cluster with the current cluster as the
+first member. Pass the complete geo cluster topology as
+arguments to this command, and then use geo-join and
+geo-init-arbitrator to add the remaining members to
+the geo cluster.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -a [USER@]HOST, --arbitrator [USER@]HOST
+ Geo cluster arbitrator
+ -s DESC, --clusters DESC
+ Geo cluster description (see details below)
+ -t LIST, --tickets LIST
+ Tickets to create (space-separated)
+
+Cluster Description
+
+ This is a map of cluster names to IP addresses.
+ Each IP address will be configured as a virtual IP
+ representing that cluster in the geo cluster
+ configuration.
+
+ Example with two clusters named paris and amsterdam:
+
+ --clusters "paris=192.168.10.10 amsterdam=192.168.10.11"
+
+ Name clusters using the --name parameter to
+ crm bootstrap init.'''
+
+
+CRM_CLUSTER_GEO_JOIN_H_OUTPUT = '''Join cluster to existing geo cluster
+
+usage: geo-join [options]
+
+This command should be run from one of the nodes in a cluster
+which is currently not a member of a geo cluster. The geo
+cluster configuration will be fetched from the provided node,
+and the cluster will be added to the geo cluster.
+
+Note that each cluster in a geo cluster needs to have a unique
+name set. The cluster name can be set using the --name argument
+to init, or by configuring corosync with the cluster name in
+an existing cluster.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -c [USER@]HOST, --cluster-node [USER@]HOST
+ An already-configured geo cluster or arbitrator
+ -s DESC, --clusters DESC
+ Geo cluster description (see geo-init for details)'''
+
+
+CRM_CLUSTER_GEO_INIT_ARBIT_H_OUTPUT = '''Initialize node as geo cluster arbitrator
+
+usage: geo-init-arbitrator [options]
+
+Configure the current node as a geo arbitrator. The command
+requires an existing geo cluster or geo arbitrator from which
+to get the geo cluster configuration.
+
+optional arguments:
+ -h, --help Show this help message
+ -q, --quiet Be quiet (don't describe what's happening, just do it)
+ -y, --yes Answer "yes" to all prompts (use with caution)
+ -c [USER@]HOST, --cluster-node [USER@]HOST
+ An already-configured geo cluster
+ --use-ssh-agent Use an existing key from ssh-agent instead of creating
+ new key pairs'''
diff --git a/test/features/steps/step_implementation.py b/test/features/steps/step_implementation.py
new file mode 100644
index 0000000..74f0cc8
--- /dev/null
+++ b/test/features/steps/step_implementation.py
@@ -0,0 +1,575 @@
+import re
+import time
+import os
+import datetime
+import yaml
+
+import behave
+from behave import given, when, then
+import behave_agent
+from crmsh import corosync, sbd, userdir, bootstrap
+from crmsh import utils as crmutils
+from crmsh.sh import ShellUtils
+from utils import check_cluster_state, check_service_state, online, run_command, me, \
+ run_command_local_or_remote, file_in_archive, \
+ assert_eq, is_unclean, assert_in
+import const
+
+
+def _parse_str(text):
+ return text[1:-1].encode('utf-8').decode('unicode_escape')
+_parse_str.pattern='".*"'
+
+
+behave.use_step_matcher("cfparse")
+behave.register_type(str=_parse_str)
+
+
+@when('Write multi lines to file "{f}" on "{addr}"')
+def step_impl(context, f, addr):
+ data_list = context.text.split('\n')
+ for line in data_list:
+ echo_option = " -n" if line == data_list[-1] else ""
+ cmd = "echo{} \"{}\"|sudo tee -a {}".format(echo_option, line, f)
+ if addr != me():
+ sudoer = userdir.get_sudoer()
+ user = f"{sudoer}@" if sudoer else ""
+ cmd = f"ssh {user}{addr} '{cmd}'"
+ run_command(context, cmd)
+
+
+@given('Cluster service is "{state}" on "{addr}"')
+def step_impl(context, state, addr):
+ assert check_cluster_state(context, state, addr) is True
+
+
+@given('Nodes [{nodes:str+}] are cleaned up')
+def step_impl(context, nodes):
+ run_command(context, 'crm resource cleanup || true')
+ for node in nodes:
+ # wait for ssh service
+ for _ in range(10):
+ rc, _, _ = ShellUtils().get_stdout_stderr('ssh {} true'.format(node))
+ if rc == 0:
+ break
+ time.sleep(1)
+ run_command_local_or_remote(context, "crm cluster stop {} || true".format(node), node)
+ assert check_cluster_state(context, 'stopped', node) is True
+
+
+@given('Service "{name}" is "{state}" on "{addr}"')
+def step_impl(context, name, state, addr):
+ assert check_service_state(context, name, state, addr) is True
+
+
+@given('Has disk "{disk}" on "{addr}"')
+def step_impl(context, disk, addr):
+ _, out, _ = run_command_local_or_remote(context, "fdisk -l", addr)
+ assert re.search(r'{} '.format(disk), out) is not None
+
+
+@given('Online nodes are "{nodelist}"')
+def step_impl(context, nodelist):
+ assert online(context, nodelist) is True
+
+
+@given('Run "{cmd}" OK')
+def step_impl(context, cmd):
+ rc, _, _ = run_command(context, cmd)
+ assert rc == 0
+
+
+@then('Run "{cmd}" OK')
+def step_impl(context, cmd):
+ rc, _, _ = run_command(context, cmd)
+ assert rc == 0
+
+
+@when('Run "{cmd}" OK')
+def step_impl(context, cmd):
+ rc, _, _ = run_command(context, cmd)
+ assert rc == 0
+
+
+@given('IP "{addr}" is belong to "{iface}"')
+def step_impl(context, addr, iface):
+ cmd = 'ip address show dev {}'.format(iface)
+ res = re.search(r' {}/'.format(addr), run_command(context, cmd)[1])
+ assert bool(res) is True
+
+
+@given('Run "{cmd}" OK on "{addr}"')
+def step_impl(context, cmd, addr):
+ _, out, _ = run_command_local_or_remote(context, cmd, addr, True)
+
+@when('Run "{cmd}" on "{addr}"')
+def step_impl(context, cmd, addr):
+ _, out, _ = run_command_local_or_remote(context, cmd, addr)
+
+
+@then('Run "{cmd}" OK on "{addr}"')
+def step_impl(context, cmd, addr):
+ _, out, _ = run_command_local_or_remote(context, cmd, addr)
+
+
+@then('Print stdout')
+def step_impl(context):
+ context.logger.info("\n{}".format(context.stdout))
+
+
+@then('Print stderr')
+def step_impl(context):
+ context.logger.info("\n{}".format(context.stderr))
+
+
+@then('No crmsh tracebacks')
+def step_impl(context):
+ if "Traceback (most recent call last):" in context.stderr and \
+ re.search('File "/usr/lib/python.*/crmsh/', context.stderr):
+ context.logger.info("\n{}".format(context.stderr))
+ context.failed = True
+
+
+@when('Try "{cmd}" on "{addr}"')
+def step_impl(context, cmd, addr):
+ run_command_local_or_remote(context, cmd, addr, exit_on_fail=False)
+
+
+@when('Try "{cmd}"')
+def step_impl(context, cmd):
+ _, out, _ = run_command(context, cmd, exit_on_fail=False)
+
+
+@when('Wait "{second}" seconds')
+def step_impl(context, second):
+ time.sleep(int(second))
+
+
+@then('Got output "{msg}"')
+def step_impl(context, msg):
+ assert context.stdout == msg
+ context.stdout = None
+
+
+@then('Expected multiple lines')
+def step_impl(context):
+ assert context.stdout == context.text
+ context.stdout = None
+
+
+@then('Expected "{msg}" in stdout')
+def step_impl(context, msg):
+ assert_in(msg, context.stdout)
+ context.stdout = None
+
+
+@then('Expected "{msg}" in stderr')
+def step_impl(context, msg):
+ assert_in(msg, context.stderr)
+ context.stderr = None
+
+
+@then('Expected regrex "{reg_str}" in stdout')
+def step_impl(context, reg_str):
+ res = re.search(reg_str, context.stdout)
+ assert res is not None
+ context.stdout = None
+
+
+@then('Expected return code is "{num}"')
+def step_impl(context, num):
+ assert context.return_code == int(num)
+
+
+@then('Expected "{msg}" not in stdout')
+def step_impl(context, msg):
+ assert msg not in context.stdout
+ context.stdout = None
+
+
+@then('Expected "{msg}" not in stderr')
+def step_impl(context, msg):
+ assert context.stderr is None or msg not in context.stderr
+ context.stderr = None
+
+
+@then('Except "{msg}"')
+def step_impl(context, msg):
+ assert_in(msg, context.stderr)
+ context.stderr = None
+
+
+@then('Except multiple lines')
+def step_impl(context):
+ assert_in(context.text, context.stderr)
+ context.stderr = None
+
+
+@then('Expected multiple lines in output')
+def step_impl(context):
+ assert_in(context.text, context.stdout)
+ context.stdout = None
+
+
+@then('Except "{msg}" in stderr')
+def step_impl(context, msg):
+ assert_in(msg, context.stderr)
+ context.stderr = None
+
+
+@then('Cluster service is "{state}" on "{addr}"')
+def step_impl(context, state, addr):
+ assert check_cluster_state(context, state, addr) is True
+
+
+@then('Service "{name}" is "{state}" on "{addr}"')
+def step_impl(context, name, state, addr):
+ assert check_service_state(context, name, state, addr) is True
+
+
+@then('Online nodes are "{nodelist}"')
+def step_impl(context, nodelist):
+ assert online(context, nodelist) is True
+
+
+@then('Node "{node}" is standby')
+def step_impl(context, node):
+ assert crmutils.is_standby(node) is True
+
+
+@then('Node "{node}" is online')
+def step_impl(context, node):
+ assert crmutils.is_standby(node) is False
+
+
+@then('IP "{addr}" is used by corosync on "{node}"')
+def step_impl(context, addr, node):
+ _, out, _ = run_command_local_or_remote(context, 'corosync-cfgtool -s', node)
+ res = re.search(r' {}\n'.format(addr), out)
+ assert bool(res) is True
+
+
+@then('Cluster name is "{name}"')
+def step_impl(context, name):
+ _, out, _ = run_command(context, 'corosync-cmapctl -b totem.cluster_name')
+ assert out.split()[-1] == name
+
+
+@then('Cluster virtual IP is "{addr}"')
+def step_impl(context, addr):
+ _, out, _ = run_command(context, 'crm configure show|grep -A1 IPaddr2')
+ res = re.search(r' ip={}'.format(addr), out)
+ assert bool(res) is True
+
+
+@then('Cluster is using udpu transport mode')
+def step_impl(context):
+ assert corosync.get_value('totem.transport') == 'udpu'
+
+
+@then('Show cluster status on "{addr}"')
+def step_impl(context, addr):
+ _, out, _ = run_command_local_or_remote(context, 'crm_mon -1', addr)
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show corosync ring status')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm corosync status ring')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show crm configure')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm configure show')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show status from qnetd')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm corosync status qnetd')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show qdevice status')
+def step_impl(context):
+ _, out, _ = run_command(context, 'crm corosync status qdevice')
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Show corosync qdevice configuration')
+def step_impl(context):
+ _, out, _ = run_command(context, "sed -n -e '/quorum/,/^}/ p' /etc/corosync/corosync.conf")
+ if out:
+ context.logger.info("\n{}".format(out))
+
+
+@then('Resource "{res}" type "{res_type}" is "{state}"')
+def step_impl(context, res, res_type, state):
+ try_count = 0
+ result = None
+ while try_count < 20:
+ time.sleep(1)
+ _, out, _ = run_command(context, "crm_mon -1rR")
+ if out:
+ result = re.search(r'\s{}\s+.*:+{}\):\s+{} '.format(res, res_type, state), out)
+ if not result:
+ try_count += 1
+ else:
+ break
+ assert result is not None
+
+
+@then('Resource "{res}" failcount on "{node}" is "{number}"')
+def step_impl(context, res, node, number):
+ cmd = "crm resource failcount {} show {}".format(res, node)
+ _, out, _ = run_command(context, cmd)
+ if out:
+ result = re.search(r'name=fail-count-{} value={}'.format(res, number), out)
+ assert result is not None
+
+
+@then('Resource "{res_type}" not configured')
+def step_impl(context, res_type):
+ _, out, _ = run_command(context, "crm configure show")
+ result = re.search(r' {} '.format(res_type), out)
+ assert result is None
+
+
+@then('Output is the same with expected "{cmd}" help output')
+def step_impl(context, cmd):
+ cmd_help = {}
+ cmd_help["crm"] = const.CRM_H_OUTPUT
+ cmd_help["crm_cluster_init"] = const.CRM_CLUSTER_INIT_H_OUTPUT
+ cmd_help["crm_cluster_join"] = const.CRM_CLUSTER_JOIN_H_OUTPUT
+ cmd_help["crm_cluster_remove"] = const.CRM_CLUSTER_REMOVE_H_OUTPUT
+ cmd_help["crm_cluster_geo-init"] = const.CRM_CLUSTER_GEO_INIT_H_OUTPUT
+ cmd_help["crm_cluster_geo-join"] = const.CRM_CLUSTER_GEO_JOIN_H_OUTPUT
+ cmd_help["crm_cluster_geo-init-arbitrator"] = const.CRM_CLUSTER_GEO_INIT_ARBIT_H_OUTPUT
+ key = '_'.join(cmd.split())
+ assert_eq(cmd_help[key], context.stdout)
+
+
+@then('Corosync working on "{transport_type}" mode')
+def step_impl(context, transport_type):
+ if transport_type == "multicast":
+ assert corosync.get_value("totem.transport") is None
+ if transport_type == "unicast":
+ assert_eq("udpu", corosync.get_value("totem.transport"))
+
+
+@then('Expected votes will be "{votes}"')
+def step_impl(context, votes):
+ assert_eq(int(votes), int(corosync.get_value("quorum.expected_votes")))
+
+
+@then('Directory "{directory}" created')
+def step_impl(context, directory):
+ assert os.path.isdir(directory) is True
+
+
+@then('Directory "{directory}" not created')
+def step_impl(context, directory):
+ assert os.path.isdir(directory) is False
+
+
+@then('Default crm_report tar file created')
+def step_impl(context):
+ default_file_name = 'crm_report-{}.tar.bz2'.format(datetime.datetime.now().strftime("%a-%d-%b-%Y"))
+ assert os.path.exists(default_file_name) is True
+
+
+@when('Remove default crm_report tar file')
+def step_impl(context):
+ default_file_name = 'crm_report-{}.tar.bz2'.format(datetime.datetime.now().strftime("%a-%d-%b-%Y"))
+ os.remove(default_file_name)
+
+
+@then('File "{f}" in "{archive}"')
+def step_impl(context, f, archive):
+ assert file_in_archive(f, archive) is True
+
+
+@then('Directory "{f}" in "{archive}"')
+def step_impl(context, f, archive):
+ assert file_in_archive(f, archive) is True
+
+
+@then('File "{f}" not in "{archive}"')
+def step_impl(context, f, archive):
+ assert file_in_archive(f, archive) is False
+
+
+@then('File "{f}" was synced in cluster')
+def step_impl(context, f):
+ cmd = "crm cluster diff {}".format(f)
+ rc, out, _ = run_command(context, cmd)
+ assert_eq("", out)
+
+
+@given('Resource "{res_id}" is started on "{node}"')
+def step_impl(context, res_id, node):
+ rc, out, err = ShellUtils().get_stdout_stderr("crm_mon -1")
+ assert re.search(r'\*\s+{}\s+.*Started\s+{}'.format(res_id, node), out) is not None
+
+
+@then('Resource "{res_id}" is started on "{node}"')
+def step_impl(context, res_id, node):
+ rc, out, err = ShellUtils().get_stdout_stderr("crm_mon -1")
+ assert re.search(r'\*\s+{}\s+.*Started\s+{}'.format(res_id, node), out) is not None
+
+
+@then('SBD option "{key}" value is "{value}"')
+def step_impl(context, key, value):
+ res = sbd.SBDManager.get_sbd_value_from_config(key)
+ assert_eq(value, res)
+
+
+@then('SBD option "{key}" value for "{dev}" is "{value}"')
+def step_impl(context, key, dev, value):
+ res = sbd.SBDTimeout.get_sbd_msgwait(dev)
+ assert_eq(int(value), res)
+
+
+@then('Cluster property "{key}" is "{value}"')
+def step_impl(context, key, value):
+ res = crmutils.get_property(key)
+ assert res is not None
+ assert_eq(value, str(res))
+
+
+@then('Property "{key}" in "{type}" is "{value}"')
+def step_impl(context, key, type, value):
+ res = crmutils.get_property(key, type)
+ assert res is not None
+ assert_eq(value, str(res))
+
+
+@then('Parameter "{param_name}" not configured in "{res_id}"')
+def step_impl(context, param_name, res_id):
+ _, out, _ = run_command(context, "crm configure show {}".format(res_id))
+ result = re.search("params {}=".format(param_name), out)
+ assert result is None
+
+
+@then('Parameter "{param_name}" configured in "{res_id}"')
+def step_impl(context, param_name, res_id):
+ _, out, _ = run_command(context, "crm configure show {}".format(res_id))
+ result = re.search("params {}=".format(param_name), out)
+ assert result is not None
+
+
+@given('Yaml "{path}" value is "{value}"')
+def step_impl(context, path, value):
+ yaml_file = "/etc/crm/profiles.yml"
+ with open(yaml_file) as f:
+ data = yaml.load(f, Loader=yaml.SafeLoader)
+ sec_name, key = path.split(':')
+ assert_eq(str(value), str(data[sec_name][key]))
+
+
+@when('Wait for DC')
+def step_impl(context):
+ while True:
+ time.sleep(1)
+ if crmutils.get_dc():
+ break
+
+
+@then('File "{path}" exists on "{node}"')
+def step_impl(context, path, node):
+ rc, _, stderr = behave_agent.call(node, 1122, 'test -f {}'.format(path), user='root')
+ assert rc == 0
+
+
+@then('File "{path}" not exist on "{node}"')
+def step_impl(context, path, node):
+ cmd = '[ ! -f {} ]'.format(path)
+ rc, _, stderr = behave_agent.call(node, 1122, cmd, user='root')
+ assert rc == 0
+
+
+@then('Directory "{path}" is empty on "{node}"')
+def step_impl(context, path, node):
+ cmd = '[ ! "$(ls -A {})" ]'.format(path)
+ rc, _, stderr = behave_agent.call(node, 1122, cmd, user='root')
+ assert rc == 0
+
+
+@then('Directory "{path}" not empty on "{node}"')
+def step_impl(context, path, node):
+ cmd = '[ "$(ls -A {})" ]'.format(path)
+ rc, _, stderr = behave_agent.call(node, 1122, cmd, user='root')
+ assert rc == 0
+
+
+@then('Node "{node}" is UNCLEAN')
+def step_impl(context, node):
+ assert is_unclean(node) is True
+
+
+@then('Wait "{count}" seconds for "{node}" successfully fenced')
+def step_impl(context, count, node):
+ index = 0
+ while index <= int(count):
+ rc, out, _ = ShellUtils().get_stdout_stderr("stonith_admin -h {}".format(node))
+ if "Node {} last fenced at:".format(node) in out:
+ return True
+ time.sleep(1)
+ index += 1
+ return False
+
+@then('Check passwordless for hacluster between "{nodelist}"')
+def step_impl(context, nodelist):
+ if userdir.getuser() != 'root' or userdir.get_sudoer():
+ return True
+ failed = False
+ nodes = nodelist.split()
+ for i in range(0, len(nodes)):
+ for j in range(i + 1, len(nodes)):
+ rc, _, _ = behave_agent.call(
+ nodes[i], 1122,
+ f'ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 {nodes[j]} true',
+ user='hacluster',
+ )
+ if rc != 0:
+ failed = True
+ context.logger.error(f"There is no passwordless configured from {nodes[i]} to {nodes[j]} under 'hacluster'")
+ assert not failed
+
+
+@then('Check user shell for hacluster between "{nodelist}"')
+def step_impl(context, nodelist):
+ if userdir.getuser() != 'root' or userdir.get_sudoer():
+ return True
+ for node in nodelist.split():
+ if node == me():
+ assert bootstrap.is_nologin('hacluster') is False
+ else:
+ assert bootstrap.is_nologin('hacluster', node) is False
+
+
+@given('ssh-agent is started at "{path}" on nodes [{nodes:str+}]')
+def step_impl(context, path, nodes):
+ user = userdir.get_sudoer()
+ if not user:
+ user = userdir.getuser()
+ for node in nodes:
+ rc, _, _ = behave_agent.call(node, 1122, f"systemd-run --uid '{user}' -u ssh-agent /usr/bin/ssh-agent -D -a '{path}'", user='root')
+ assert 0 == rc
+
+
+@then('This file "{target_file}" will trigger UnicodeDecodeError exception')
+def step_impl(context, target_file):
+ try:
+ with open(target_file, "r", encoding="utf-8") as file:
+ content = file.read()
+ except UnicodeDecodeError as e:
+ return True
+ else:
+ return False
diff --git a/test/features/steps/utils.py b/test/features/steps/utils.py
new file mode 100644
index 0000000..675c2c4
--- /dev/null
+++ b/test/features/steps/utils.py
@@ -0,0 +1,177 @@
+import concurrent.futures
+import difflib
+import tarfile
+import glob
+import re
+import socket
+from crmsh import utils, userdir
+from crmsh.sh import ShellUtils
+import behave_agent
+
+
+COLOR_MODE = r'\x1b\[[0-9]+m'
+
+
+def get_file_type(file_path):
+ rc, out, _ = ShellUtils().get_stdout_stderr("file {}".format(file_path))
+ if re.search(r'{}: bzip2'.format(file_path), out):
+ return "bzip2"
+ if re.search(r'{}: directory'.format(file_path), out):
+ return "directory"
+
+
+def get_all_files(archive_path):
+ archive_type = get_file_type(archive_path)
+ if archive_type == "bzip2":
+ with tarfile.open(archive_path) as tar:
+ return tar.getnames()
+ if archive_type == "directory":
+ all_files = glob.glob("{}/*".format(archive_path)) + glob.glob("{}/*/*".format(archive_path))
+ return all_files
+
+
+def file_in_archive(f, archive_path):
+ for item in get_all_files(archive_path):
+ if re.search(r'/{}$'.format(f), item):
+ return True
+ return False
+
+
+def me():
+ return socket.gethostname()
+
+
+def _wrap_cmd_non_root(cmd):
+ """
+ When running command under sudoer, or the current user is not root,
+ wrap crm cluster join command with '<user>@', and for the -N option, too
+ """
+ sudoer = userdir.get_sudoer()
+ current_user = userdir.getuser()
+ if sudoer:
+ user = sudoer
+ elif current_user != 'root':
+ user = current_user
+ else:
+ return cmd
+ if re.search('cluster (:?join|geo_join|geo_init_arbitrator)', cmd) and "@" not in cmd:
+ cmd = re.sub(r'''((?:-c|-N|--qnetd-hostname|--cluster-node|--arbitrator)(?:\s+|=)['"]?)(\S{2,}['"]?)''', f'\\1{user}@\\2', cmd)
+ elif "cluster init" in cmd and ("-N" in cmd or "--qnetd-hostname" in cmd) and "@" not in cmd:
+ cmd = re.sub(r'''((?:-c|-N|--qnetd-hostname|--cluster-node)(?:\s+|=)['"]?)(\S{2,}['"]?)''', f'\\1{user}@\\2', cmd)
+ elif "cluster init" in cmd and "--node" in cmd and "@" not in cmd:
+ search_patt = r"--node [\'\"](.*)[\'\"]"
+ res = re.search(search_patt, cmd)
+ if res:
+ node_str = ' '.join([f"{user}@{n}" for n in res.group(1).split()])
+ cmd = re.sub(search_patt, f"--node '{node_str}'", cmd)
+ return cmd
+
+
+def run_command(context, cmd, exit_on_fail=True):
+ cmd = _wrap_cmd_non_root(cmd)
+ rc, out, err = ShellUtils().get_stdout_stderr(cmd)
+ context.return_code = rc
+ if out:
+ out = re.sub(COLOR_MODE, '', out)
+ context.stdout = out
+ if err:
+ err = re.sub(COLOR_MODE, '', err)
+ context.stderr = err
+ if rc != 0 and exit_on_fail:
+ if out:
+ context.logger.info("\n{}\n".format(out))
+ context.logger.error("\n{}\n".format(err))
+ context.failed = True
+ return rc, out, err
+
+
+def run_command_local_or_remote(context, cmd, addr, exit_on_fail=True):
+ if addr == me():
+ return run_command(context, cmd, exit_on_fail)
+ cmd = _wrap_cmd_non_root(cmd)
+ sudoer = userdir.get_sudoer()
+ if sudoer is None:
+ user = None
+ else:
+ user = sudoer
+ cmd = f'sudo {cmd}'
+ hosts = addr.split(',')
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(hosts)) as executor:
+ results = list(executor.map(lambda x: (x, behave_agent.call(x, 1122, cmd, user=user)), hosts))
+ out = utils.to_ascii(results[0][1][1])
+ err = utils.to_ascii(results[0][1][2])
+ context.stdout = out
+ context.stderr = err
+ context.return_code = 0
+ for host, (rc, stdout, stderr) in results:
+ if rc != 0:
+ err = re.sub(COLOR_MODE, '', utils.to_ascii(stderr))
+ context.stderr = err
+ if exit_on_fail:
+ import os
+ context.logger.error("Failed to run %s on %s@%s :%s", cmd, os.geteuid(), host, err)
+ raise ValueError("{}".format(err))
+ else:
+ return
+ return 0, out, err
+
+
+def check_service_state(context, service_name, state, addr):
+ if state not in ["started", "stopped", "enabled", "disabled"]:
+ context.logger.error("\nService state should be \"started/stopped/enabled/disabled\"\n")
+ context.failed = True
+ if state in {'enabled', 'disabled'}:
+ rc, _, _ = behave_agent.call(addr, 1122, f'systemctl is-enabled {service_name}', 'root')
+ return (state == 'enabled') == (rc == 0)
+ elif state in {'started', 'stopped'}:
+ rc, _, _ = behave_agent.call(addr, 1122, f'systemctl is-active {service_name}', 'root')
+ return (state == 'started') == (rc == 0)
+ else:
+ context.logger.error("\nService state should be \"started/stopped/enabled/disabled\"\n")
+ raise ValueError("Service state should be \"started/stopped/enabled/disabled\"")
+
+
+def check_cluster_state(context, state, addr):
+ return check_service_state(context, 'pacemaker.service', state, addr)
+
+
+def is_unclean(node):
+ rc, out, err = ShellUtils().get_stdout_stderr("crm_mon -1")
+ return "{}: UNCLEAN".format(node) in out
+
+
+def online(context, nodelist):
+ rc = True
+ _, out = ShellUtils().get_stdout("sudo crm_node -l")
+ for node in nodelist.split():
+ node_info = "{} member".format(node)
+ if not node_info in out:
+ rc = False
+ context.logger.error("\nNode \"{}\" not online\n".format(node))
+ return rc
+
+def assert_eq(expected, actual):
+ if expected != actual:
+ msg = "\033[32m" "Expected" "\033[31m" " != Actual" "\033[0m" "\n" \
+ "\033[32m" "Expected:" "\033[0m" " {}\n" \
+ "\033[31m" "Actual:" "\033[0m" " {}".format(expected, actual)
+ if isinstance(expected, str) and '\n' in expected:
+ try:
+ diff = '\n'.join(difflib.unified_diff(
+ expected.splitlines(),
+ actual.splitlines(),
+ fromfile="expected",
+ tofile="actual",
+ lineterm="",
+ ))
+ msg = "{}\n" "\033[31m" "Diff:" "\033[0m" "\n{}".format(msg, diff)
+ except Exception:
+ pass
+ raise AssertionError(msg)
+
+def assert_in(expected, actual):
+ if expected not in actual:
+ msg = "\033[32m" "Expected" "\033[31m" " not in Actual" "\033[0m" "\n" \
+ "\033[32m" "Expected:" "\033[0m" " {}\n" \
+ "\033[31m" "Actual:" "\033[0m" " {}".format(expected, actual)
+ raise AssertionError(msg)
diff --git a/test/features/user_access.feature b/test/features/user_access.feature
new file mode 100644
index 0000000..180dd3f
--- /dev/null
+++ b/test/features/user_access.feature
@@ -0,0 +1,114 @@
+@user
+Feature: Functional test for user access
+
+ Need nodes: hanode1
+
+ Scenario: User in haclient group
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "useradd -m -s /bin/bash -N -g 90 xin1" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~xin1/.bashrc" on "hanode1"
+ When Try "su - xin1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ ERROR: Please run this command starting with "sudo".
+ Currently, this command needs to use sudo to escalate itself as root.
+ Please consider to add "xin1" as sudoer. For example:
+ sudo bash -c 'echo "xin1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin1'
+ """
+ When Run "echo "xin1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin1" on "hanode1"
+ When Try "su - xin1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - xin1 -c 'sudo crm cluster init -y'" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+
+ When Run "su - xin1 -c 'crm node standby hanode1'" on "hanode1"
+ Then Node "hanode1" is standby
+
+ @clean
+ Scenario: User in sudoer
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "useradd -m -s /bin/bash xin3" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~xin3/.bashrc" on "hanode1"
+ And Run "echo "xin3 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin3" on "hanode1"
+ When Try "su - xin3 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - xin3 -c 'sudo crm cluster init -y'" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+
+ When Try "su - xin3 -c 'crm node standby hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - xin3 -c 'sudo crm node standby hanode1'" on "hanode1"
+ Then Node "hanode1" is standby
+
+ @clean
+ Scenario: Normal user access
+ Given Cluster service is "stopped" on "hanode1"
+ When Run "useradd -m -s /bin/bash user1" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~user1/.bashrc" on "hanode1"
+ When Try "su - user1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo".
+ Currently, this command needs to use sudo to escalate itself as root.
+ Please consider to add "user1" as sudoer. For example:
+ sudo bash -c 'echo "user1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user1'
+ """
+ When Run "echo "user1 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user1" on "hanode1"
+ When Try "su - user1 -c 'crm cluster init -y'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - user1 -c 'sudo crm cluster init -y'" on "hanode1"
+ Then Cluster service is "started" on "hanode1"
+
+ When Run "useradd -m -s /bin/bash user2" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~user2/.bashrc" on "hanode1"
+ When Try "su - user2 -c 'crm node standby hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: This command needs higher privilege.
+ Option 1) Please consider to add "user2" as sudoer. For example:
+ sudo bash -c 'echo "user2 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user2'
+ Option 2) Add "user2" to the haclient group. For example:
+ sudo usermod -g haclient user2
+ """
+ When Run "usermod -g haclient user2" on "hanode1"
+ When Run "su - user2 -c 'crm node standby hanode1'" on "hanode1"
+ Then Node "hanode1" is standby
+
+ When Run "useradd -m -s /bin/bash user3" on "hanode1"
+ When Run "echo 'export PATH=$PATH:/usr/sbin/' >> ~user3/.bashrc" on "hanode1"
+ When Try "su - user3 -c 'crm node online hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: This command needs higher privilege.
+ Option 1) Please consider to add "user3" as sudoer. For example:
+ sudo bash -c 'echo "user3 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user3'
+ Option 2) Add "user3" to the haclient group. For example:
+ sudo usermod -g haclient user3
+ """
+ When Run "echo "user3 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user3" on "hanode1"
+ When Try "su - user3 -c 'crm node online hanode1'"
+ Then Except multiple lines
+ """
+ WARNING: Failed to open log file: [Errno 13] Permission denied: '/var/log/crmsh/crmsh.log'
+ ERROR: Please run this command starting with "sudo"
+ """
+ When Run "su - user3 -c 'sudo crm node online hanode1'" on "hanode1"
+ Then Node "hanode1" is online
diff --git a/test/history-test.tar.bz2 b/test/history-test.tar.bz2
new file mode 100644
index 0000000..38b37d0
--- /dev/null
+++ b/test/history-test.tar.bz2
Binary files differ
diff --git a/test/list-undocumented-commands.py b/test/list-undocumented-commands.py
new file mode 100755
index 0000000..4729b48
--- /dev/null
+++ b/test/list-undocumented-commands.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python3
+#
+# Script to discover and report undocumented commands.
+
+from crmsh.ui_root import Root
+from crmsh import help
+
+help.HELP_FILE = "doc/crm.8.adoc"
+help._load_help()
+
+_IGNORED_COMMANDS = ('help', 'quit', 'cd', 'up', 'ls')
+
+
+def check_help(ui):
+ for name, child in ui.children().items():
+ if child.type == 'command':
+ try:
+ h = help.help_command(ui.name, name)
+ if h.generated and name not in _IGNORED_COMMANDS:
+ print("Undocumented: %s %s" % (ui.name, name))
+ except:
+ print("Undocumented: %s %s" % (ui.name, name))
+ elif child.type == 'level':
+ h = help.help_level(name)
+ if h.generated:
+ print("Undocumented: %s %s" % (ui.name, name))
+ check_help(child.level)
+
+check_help(Root())
diff --git a/test/profile-history.sh b/test/profile-history.sh
new file mode 100755
index 0000000..02831f8
--- /dev/null
+++ b/test/profile-history.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+case $1 in
+ cumulative)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"cumulative\").print_stats()" | less
+ ;;
+ time)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\").print_stats()" | less
+ ;;
+ timecum)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\", \"cum\").print_stats()" | less
+ ;;
+ callers)
+ python -c "import pstats; s = pstats.Stats(\"$2\"); s.print_callers(.5, \"$3\")" | less
+ ;;
+ verbose)
+ PYTHONPATH=. ./crm -X "$2" -H "$3" history log
+ ;;
+ *)
+ PYTHONPATH=. ./crm -X "$1" -H "$2" history log >/dev/null
+ ;;
+esac
diff --git a/test/regression.sh b/test/regression.sh
new file mode 100755
index 0000000..ec1a416
--- /dev/null
+++ b/test/regression.sh
@@ -0,0 +1,199 @@
+#!/bin/sh
+# Copyright (C) 2007 Dejan Muhamedagic <dmuhamedagic@suse.de>
+# See COPYING for license information.
+
+rootdir="$(dirname "$0")"
+TESTDIR=${TESTDIR:-$rootdir/testcases}
+DFLT_TESTSET=basicset
+OUTDIR=${OUTDIR:-crmtestout}
+CRM_OUTF="$OUTDIR/crm.out"
+CRM_LOGF="$OUTDIR/crm.log"
+CRM_DEBUGF="$OUTDIR/crm.debug"
+OUTF="$OUTDIR/regression.out"
+DIFF_OPTS="--ignore-all-space -U 1"
+common_filter=$TESTDIR/common.filter
+common_exclf=$TESTDIR/common.excl
+export OUTDIR
+
+logmsg() {
+ echo "$(date): $*" | tee -a "$CRM_DEBUGF" | tee -a "$CRM_LOGF"
+}
+abspath() {
+ echo "$1" | grep -qs "^/" && echo "$1" || echo "$(pwd)/$1"
+}
+
+usage() {
+ cat<<EOF
+
+usage: $0 [-q] [-P] [testcase...|set:testset]
+
+Test crm shell using supplied testcases. If none are given,
+set:basicset is used. All testcases and sets are in testcases/.
+See also README.regression for description.
+
+-q: quiet operation (no progress shown)
+-P: profile test
+
+EOF
+exit 2
+}
+
+if [ ! -d "$TESTDIR" ]; then
+ echo "$0: $TESTDIR does not exit"
+ usage
+fi
+
+rm -f "$CRM_LOGF" "$CRM_DEBUGF"
+
+# make tools/lrmd/stonithd log to our files only
+HA_logfile="$(abspath "$CRM_LOGF")"
+HA_debugfile="$(abspath "$CRM_DEBUGF")"
+HA_use_logd=no
+HA_logfacility=""
+export HA_logfile HA_debugfile HA_use_logd HA_logfacility
+
+mkdir -p "$OUTDIR"
+. /etc/ha.d/shellfuncs
+
+args="$(getopt hqPc:p:m: "$*")"
+[ $? -ne 0 ] && usage
+eval set -- "$args"
+
+output_mode="normal"
+while [ x"$1" != x ]; do
+ case "$1" in
+ -h) usage;;
+ -m) output_mode=$2; shift 1;;
+ -q) output_mode="silent";;
+ -P) do_profile=1;;
+ -c) CRM=$2; export CRM; shift 1;;
+ -p) PATH="$2:$PATH"; export PATH; shift 1;;
+ --) shift 1; break;;
+ *) usage;;
+ esac
+ shift 1
+done
+
+exec >"$OUTF" 2>&1
+
+# Where to send user output
+# evaltest.sh also uses >&3 for printing progress dots
+case $output_mode in
+ silent) exec 3>/dev/null;;
+ buildbot) exec 3>"$CRM_OUTF";;
+ *) exec 3>/dev/tty;;
+esac
+
+setenvironment() {
+ filterf=$TESTDIR/$testcase.filter
+ pref=$TESTDIR/$testcase.pre
+ postf=$TESTDIR/$testcase.post
+ exclf=$TESTDIR/$testcase.excl
+ log_filter=$TESTDIR/$testcase.log_filter
+ expf=$TESTDIR/$testcase.exp
+ outf=$OUTDIR/$testcase.out
+ difff=$OUTDIR/$testcase.diff
+}
+
+filter_output() {
+ { [ -x $common_filter ] && $common_filter || cat;} |
+ { [ -f $common_exclf ] && egrep -vf $common_exclf || cat;} |
+ { [ -x $filterf ] && $filterf || cat;} |
+ { [ -f $exclf ] && egrep -vf $exclf || cat;}
+}
+
+dumpcase() {
+ cat<<EOF
+----------
+testcase $testcase failed
+output is in $outf
+diff (from $difff):
+`cat $difff`
+----------
+EOF
+}
+
+runtestcase() {
+ setenvironment
+ (
+ cd $rootdir
+ [ -x "$pref" ] && $pref >/dev/null 2>&1
+ )
+ echo -n "$testcase" >&3
+ logmsg "BEGIN testcase $testcase"
+ (
+ cd $rootdir
+ ./evaltest.sh $testargs
+ ) < $TESTDIR/$testcase > $outf 2>&1
+
+ perl -pi -e 's/\<cib[^>]*\>/\<cib\>/g' $outf
+
+ filter_output < $outf |
+ if [ "$prepare" ]; then
+ echo " saving to expect file" >&3
+ cat > $expf
+ else
+ (
+ cd $rootdir
+ [ -x "$postf" ] && $postf >/dev/null 2>&1
+ )
+ echo -n " checking..." >&3
+ if head -2 $expf | grep -qs '^<cib'; then
+ crm_diff -o $expf -n -
+ else
+ diff $DIFF_OPTS $expf -
+ fi > $difff
+ if [ $? -ne 0 ]; then
+ echo " FAIL" >&3
+ cat $difff >&3
+ dumpcase
+ return 1
+ else
+ echo " PASS" >&3
+ rm -f $outf $difff
+ fi
+ fi
+ sed -n "/BEGIN testcase $testcase/,\$p" $CRM_LOGF |
+ { [ -x $log_filter ] && $log_filter || cat;} |
+ egrep '(CRIT|ERROR):'
+ logmsg "END testcase $testcase"
+}
+
+[ "$1" = prepare ] && { prepare=1; shift 1;}
+[ $# -eq 0 ] && set "set:$DFLT_TESTSET"
+testargs=""
+if [ -n "$do_profile" ]; then
+ if echo $1 | grep -qs '^set:'; then
+ echo you can profile just one test
+ echo 'really!'
+ exit 1
+ fi
+ testargs="prof"
+fi
+
+for a; do
+ if [ "$a" ] && [ -f "$TESTDIR/$a" ]; then
+ testcase=$a
+ runtestcase
+ elif echo "$a" | grep -q "^set:"; then
+ TESTSET="$TESTDIR/$(echo $a | sed 's/set://')"
+ if [ -f "$TESTSET" ]; then
+ while read -r testcase; do
+ runtestcase
+ done < "$TESTSET"
+ else
+ echo "testset $TESTSET does not exist" >&3
+ fi
+ else
+ echo "test $TESTDIR/$a does not exist" >&3
+ fi
+done
+
+res=`grep -E -wv '(BEGIN|END) testcase|warning: stray .* before' "$OUTF"`
+if [ -n "$res" ];then
+ echo "The failed messages: $res"
+ echo "seems like some tests failed or else something not expected"
+ echo "check $OUTF and diff files in $OUTDIR"
+ echo "in case you wonder what lrmd was doing, read $(abspath "$CRM_LOGF") and $(abspath "$CRM_DEBUGF")"
+ exit 1
+fi >&3
diff --git a/test/run-functional-tests b/test/run-functional-tests
new file mode 100755
index 0000000..5c3bca7
--- /dev/null
+++ b/test/run-functional-tests
@@ -0,0 +1,551 @@
+#!/bin/bash
+DOCKER_IMAGE=${DOCKER_IMAGE:-"nyang23/haleap:15.5"}
+PROJECT_PATH=$(dirname $(dirname `realpath $0`))
+PROJECT_INSIDE="/opt/crmsh"
+DOCKER_SERVICE="docker.service"
+COROSYNC_CONF="/etc/corosync/corosync.conf"
+COROSYNC_AUTH="/etc/corosync/authkey"
+HA_NETWORK_FIRST="ha_network_first"
+HA_NETWORK_SECOND="ha_network_second"
+declare -a HA_NETWORK_ARRAY
+declare -a HA_NETWORK_V6_ARRAY
+HA_NETWORK_ARRAY[0]=$HA_NETWORK_FIRST
+HA_NETWORK_ARRAY[1]=$HA_NETWORK_SECOND
+HA_NETWORK_V6_ARRAY[0]="2001:db8:10::/64"
+HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64"
+BEHAVE_CASE_DIR="$(dirname $0)/features/"
+BEHAVE_CASE_EXCLUDE="sbd|ocfs2"
+
+read -r -d '' SSHD_CONFIG_AZURE << EOM
+PermitRootLogin no
+AuthorizedKeysFile .ssh/authorized_keys
+ChallengeResponseAuthentication no
+UsePAM yes
+X11Forwarding yes
+ClientAliveInterval 180
+Subsystem sftp /usr/lib/ssh/sftp-server
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL
+PasswordAuthentication no
+EOM
+
+read -r -d '' COROSYNC_CONF_TEMPLATE << EOM
+totem {
+ version: 2
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+ interface {
+ ringnumber: 0
+ mcastport: 5405
+ ttl: 1
+ }
+
+ transport: udpu
+ crypto_hash: sha1
+ crypto_cipher: aes256
+ token: 5000
+ join: 60
+ max_messages: 20
+ token_retransmits_before_loss_const: 10
+}
+
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: no
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+
+}
+
+nodelist {
+}
+
+quorum {
+
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+}
+EOM
+
+
+fatal() {
+ error $*
+ exit 1
+}
+
+
+error() {
+ echo "ERROR: $*"
+}
+
+
+warning() {
+ echo "WARNING: $*"
+}
+
+
+info() {
+ echo "INFO: $*"
+}
+
+
+is_number() {
+ num=$1
+ test ! -z "$num" && test "$num" -eq "$num" 2> /dev/null && test "$num" -gt 0 2> /dev/null
+}
+
+
+check_docker_env() {
+ # check if docker available
+ systemctl list-unit-files $DOCKER_SERVICE &> /dev/null
+ if [ "$?" -ne 0 ];then
+ fatal "$DOCKER_SERVICE is not available"
+ fi
+ # check if docker.service started
+ systemctl is-active $DOCKER_SERVICE &> /dev/null
+ if [ "$?" -ne 0 ];then
+ fatal "$DOCKER_SERVICE is not active"
+ fi
+ # check if docker cgroup driver is systemd
+ docker info 2> /dev/null|grep -q "Cgroup Driver: systemd"
+ if [ "$?" -ne 0 ];then
+ warning "docker cgroup driver suggest to be \"systemd\""
+ fi
+
+ [ "$1" == "cleanup" ] && return
+ # check if ha network already exists
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network ls|grep -q "$network"
+ if [ "$?" -eq 0 ];then
+ fatal "HA specific network \"$network\" already exists"
+ fi
+ done
+}
+
+
+get_test_case_array() {
+ test -d $BEHAVE_CASE_DIR || fatal "Cannot find '$BEHAVE_CASE_DIR'"
+ ls $BEHAVE_CASE_DIR|grep "\.feature"|grep -Ev "$BEHAVE_CASE_EXCLUDE"
+}
+
+
+echo_test_cases() {
+ case_arry=`get_test_case_array`
+ echo "Index|File Name|Description"
+ index=1
+ for f in ${case_arry[@]};do
+ desc=`awk -F: '/Feature/{print $2}' $BEHAVE_CASE_DIR/$f`
+ printf "%3s %-40s %-60s\n" $index $f "$desc"
+ index=$(($index+1))
+ done
+ printf "%3s %-40s %-60s\n" $index "regression test" "Original regression test"
+}
+
+
+usage_and_exit() {
+ prog=`basename $0`
+ cat <<END
+Usage: $prog [OPTIONS]|[TESTCASE INDEX]
+$prog is a tool for developers to setup the cluster in containers to run functional tests.
+The container image is based on Tumbleweed with preinstalled packages of the cluster stack include pacemaker/corosync/crmsh and many others.
+Users can make the code change under crmsh.git including test cases. This tool will pick up the code change and "make install" to all running containers.
+
+OPTIONS:
+ -h, --help Show this help message and exit
+ -l List existing functional test cases and exit
+ -n NUM Only setup a cluster with NUM nodes(containers)
+ -x Don't config corosync on containers(with -n option)
+ -d Cleanup the cluster containers
+ -u Create normal users, and Azure like ssh environment
+ -q Create a qnetd node(with -n and -x option)
+
+EXAMPLES:
+To launch 2 nodes with the running cluster with the very basic corosync.conf
+# crmsh.git/test/run-functional-tests -n 2
+
+To launch 2 nodes without the cluster stack running to play with "crm cluster init/join"
+# crmsh.git/run-functional-tests -n 2 -x
+
+To launch 2 nodes without the cluster stack running, and a qnetd node(named 'qnetd-node')
+# crmsh.git/run-functional-tests -n 2 -x -q
+
+To list the existing test cases. Users could add his own new test cases.
+# crmsh.git/test/run-functional-tests -l
+
+To run a single or a number of functional test cases
+# crmsh.git/test/run-functional-tests 1
+# crmsh.git/test/run-functional-tests 1 2 3
+
+To clean up the all containers which are generated by this tool
+# crmsh.git/test/run-functional-tests -d
+END
+ exit 1
+}
+
+
+docker_exec() {
+ name=$1
+ cmd=$2
+ docker exec -t $name /bin/sh -c "$cmd"
+}
+
+set_sshd_config_like_in_azure() {
+ node_name=$1
+ docker_exec $node_name "echo \"$SSHD_CONFIG_AZURE\" > /etc/ssh/sshd_config"
+ docker_exec $node_name "systemctl restart sshd.service"
+}
+
+create_custom_user() {
+ user_name=$1
+ user_id=$2
+ docker_exec $node_name "useradd -m -s /bin/bash ${user_name} 2>/dev/null"
+ docker_exec $node_name "chmod u+w /etc/sudoers"
+ docker_exec $node_name "echo \"${user_name} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers"
+ docker_exec $node_name "chmod u-w /etc/sudoers"
+ docker_exec $node_name "echo 'export PATH=\$PATH:/usr/sbin/' >> ~${user_name}/.bashrc"
+ docker_exec $node_name "echo -e \"linux\\nlinux\" | passwd ${user_name} 2>/dev/null"
+ docker_exec $node_name "cp -r /root/.ssh ~${user_name}/ && chown ${user_name}:haclient -R ~${user_name}/.ssh"
+ info "Create user '$user_name' on $node_name"
+}
+
+create_alice_bob_carol() {
+ # Custom users are alice, bob and carol and they are as important as the root
+ # and eventually they should be already in the docker image
+ # However now, let's create them here
+ create_custom_user "alice" "1000"
+ create_custom_user "bob" "1001"
+ create_custom_user "carol" "1002"
+}
+
+deploy_ha_node() {
+ node_name=$1
+ docker_options="-d --name $node_name -h $node_name --privileged --shm-size 1g"
+ make_cmd="cd $PROJECT_INSIDE;./autogen.sh && ./configure --prefix /usr && make install && make install-crmconfDATA prefix= && cp /usr/bin/crm /usr/sbin"
+
+ info "Deploying \"$node_name\"..."
+ docker run --restart always $docker_options $DOCKER_IMAGE &> /dev/null
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network connect $network $node_name
+ done
+
+ if [ "$node_name" != "qnetd-node" ];then
+ rm_qnetd_cmd="rpm -q corosync-qnetd && rpm -e corosync-qnetd"
+ docker_exec $node_name "$rm_qnetd_cmd" &> /dev/null
+ fi
+ docker_exec $node_name "rm -rf /run/nologin"
+ docker_exec $node_name "echo 'StrictHostKeyChecking no' >> /etc/ssh/ssh_config"
+
+ if [ "$node_name" != "qnetd-node" ];then
+ docker cp $PROJECT_PATH $node_name:/opt/crmsh
+ info "Building crmsh on \"$node_name\"..."
+ docker_exec $node_name "$make_cmd" 1> /dev/null || \
+ fatal "Building failed on $node_name!"
+ docker_exec $node_name "chown hacluster:haclient -R /var/log/crmsh"
+ docker_exec $node_name "chmod g+w -R /var/log/crmsh"
+ create_alice_bob_carol
+ if [ "$NORMAL_USER_FLAG" -eq 1 ];then
+ set_sshd_config_like_in_azure $node_name
+ fi
+ else
+ docker_exec $node_name "useradd -m -s /bin/bash alice 2>/dev/null"
+ docker_exec $node_name "echo \"alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
+ docker_exec $node_name "cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
+ info "Create user 'alice' on $node_name"
+ [ "$NORMAL_USER_FLAG" -eq 1 ] && set_sshd_config_like_in_azure $node_name
+ fi
+}
+
+
+create_node() {
+ info "Loading docker image $DOCKER_IMAGE..."
+ docker pull $DOCKER_IMAGE &> /dev/null
+
+ for index in ${!HA_NETWORK_ARRAY[@]};do
+ network=${HA_NETWORK_ARRAY[$index]}
+ info "Create ha specific docker network \"$network\"..."
+ docker network create --ipv6 --subnet ${HA_NETWORK_V6_ARRAY[$index]} $network &> /dev/null
+ done
+
+ for node in $*;do
+ deploy_ha_node $node &
+ done
+ wait
+}
+
+
+config_cluster() {
+ node_num=$#
+ insert_str=""
+ container_ip_array=(`docker network inspect $HA_NETWORK_ARRAY -f '{{range .Containers}}{{printf "%s " .IPv4Address}}{{end}}'`)
+
+ for i in $(seq $node_num -1 1);do
+ ip=`echo ${container_ip_array[$((i-1))]}|awk -F/ '{print $1}'`
+ insert_str+="\\n\\tnode {\n\t\tring0_addr: $ip\n\t\tnodeid: $i\n\t}"
+ done
+ corosync_conf_str=$(sed "/nodelist/a \\${insert_str}" <(echo "$COROSYNC_CONF_TEMPLATE"))
+ if [ $node_num -eq 2 ];then
+ corosync_conf_str=$(sed "/corosync_votequorum/a \\\\ttwo_node: 1" <(echo "$corosync_conf_str"))
+ fi
+
+ info "Copy corosync.conf to $*"
+ for node in $*;do
+ if [ $node == $1 ];then
+ docker_exec $1 "echo \"$corosync_conf_str\" >> $COROSYNC_CONF"
+ docker_exec $1 "corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
+ else
+ while :
+ do
+ docker_exec $1 "ssh -T -o Batchmode=yes $node true &> /dev/null" && break
+ sleep 1
+ done
+ docker_exec $1 "scp -p $COROSYNC_CONF $COROSYNC_AUTH $node:/etc/corosync &> /dev/null"
+ fi
+ done
+}
+
+
+start_cluster() {
+ for node in $*;do
+ docker_exec $node "crm cluster enable && crm cluster start" 1> /dev/null
+ if [ "$?" -eq 0 ];then
+ info "Cluster service started on \"$node\""
+ else
+ fatal "Failed to start cluster service on \"$node\""
+ fi
+ done
+}
+
+
+container_already_exists() {
+ docker ps -a|grep -q "$1"
+ if [ "$?" -eq 0 ];then
+ fatal "Container \"$1\" already running"
+ fi
+}
+
+
+setup_cluster() {
+ hanodes_arry=()
+ is_number $1
+ if [ "$?" -eq 0 ];then
+ for i in $(seq 1 $1);do
+ hanodes_arry+=("hanode$i")
+ done
+ else
+ hanodes_arry=($*)
+ fi
+
+ if [ $WITH_QNETD_NODE -eq 1 ];then
+ create_node ${hanodes_arry[@]} "qnetd-node"
+ else
+ create_node ${hanodes_arry[@]}
+ fi
+
+ [ "$CONFIG_COROSYNC_FLAG" -eq 0 ] && return
+ config_cluster ${hanodes_arry[@]}
+ start_cluster ${hanodes_arry[@]}
+ docker_exec "hanode1" "crm configure property stonith-enabled=false"
+}
+
+
+cleanup_container() {
+ node=$1
+ info "Cleanup container \"$node\"..."
+ docker container stop $node &> /dev/null
+ docker container rm $node &> /dev/null
+}
+
+
+cleanup_cluster() {
+ exist_network_array=()
+ for network in ${HA_NETWORK_ARRAY[@]};do
+ docker network ls|grep -q $network && exist_network_array+=($network)
+ done
+ if [ ${#exist_network_array[@]} -eq 0 ];then
+ info "Already cleaned up"
+ return 0
+ fi
+
+ container_array=(`docker network inspect $exist_network_array -f '{{range .Containers}}{{printf "%s " .Name}}{{end}}'`)
+ for node in ${container_array[@]};do
+ cleanup_container $node &
+ done
+ wait
+
+ for network in ${exist_network_array[@]};do
+ info "Cleanup ha specific docker network \"$network\"..."
+ docker network rm $network &> /dev/null
+ done
+}
+
+
+adjust_test_case() {
+ node_name=$1
+ replace_arry=(`docker_exec $node_name "grep -o -E '@(hanode[0-9]+|qnetd-node)\.ip[6]?\.(default|[0-9])' $2|sort -u|dos2unix"`)
+ for item in ${replace_arry[@]};do
+ item_str=${item##@}
+ node=`echo $item_str|cut -d "." -f 1`
+ ip_version=`echo $item_str|cut -d "." -f 2|tr -d "\r"`
+ ip_search_str="IPAddress"
+ if [ "$ip_version" == "ip6" ];then
+ ip_search_str="GlobalIPv6Address"
+ fi
+ index=`echo $item_str|cut -d "." -f 3|tr -d "\r"`
+ if [ "$index" == "default" ];then
+ ip=`docker container inspect $node -f "{{range .NetworkSettings.Networks}}{{printf \"%s \" .$ip_search_str}}{{end}}"|awk '{print $1}'|tr -d "\r"`
+ else
+ ip=`docker container inspect $node -f "{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[$index]}.$ip_search_str}}"|tr -d "\r"`
+ fi
+ item=`echo $item|tr -d "\r"`
+ docker_exec $node_name "sed -i s/$item/$ip/g $2"
+ done
+
+ vip_replace_array=(`docker_exec $node_name "grep -o -E '@vip\.[0-9]' $2|sort -u|dos2unix"`)
+ for item in ${vip_replace_array[@]};do
+ index=`echo $item|cut -d "." -f 2|tr -d "\r"`
+ suffix=$((123+index))
+ ip=`docker container inspect $node_name -f "{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[0]}.IPAddress}}"|tr -d "\r"`
+ vip=`echo $ip|sed "s/\.[0-9][0-9]*$/\.$suffix/g"|tr -d "\r"`
+ item=`echo $item|tr -d "\r"`
+ docker_exec $node_name "sed -i s/$item/$vip/g $2"
+ done
+}
+
+
+run_origin_regression_test() {
+ CONFIG_COROSYNC_FLAG=0
+ setup_cluster "hanode1"
+ docker_exec "hanode1" "sh /usr/share/crmsh/tests/regression.sh"
+ return $?
+}
+
+
+prepare_coverage_env() {
+ for node in $*; do
+ docker exec -t $node /bin/sh -c 'sed -i '\''1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()'\'' /usr/sbin/crm'
+ done
+}
+
+
+fetch_coverage_report() {
+ local unique_id=$(dd if=/dev/urandom count=1 bs=6 | base64 | tr '+/' '-_')
+ for node in $*; do
+ docker_exec "$node" "coverage combine; coverage xml -o /opt/coverage.xml"
+ # see https://github.com/codecov/codecov-cli/blob/master/codecov_cli/services/upload/coverage_file_finder.py
+ docker cp "$node":/opt/coverage.xml coverage."$unique_id"."$node".xml
+ done
+}
+
+
+WITH_QNETD_NODE=0
+NORMAL_USER_FLAG=0
+CONFIG_COROSYNC_FLAG=1
+SETUP_N_NODES_CLUSTER=0
+options=$(getopt -l "help" -o "hldxuqn:" -- "$@")
+eval set -- "$options"
+while true;do
+case $1 in
+-h|--help) usage_and_exit;;
+-l)
+ echo_test_cases
+ exit 0
+ ;;
+-d)
+ check_docker_env cleanup
+ cleanup_cluster
+ exit $?
+ ;;
+-x)
+ CONFIG_COROSYNC_FLAG=0
+ shift
+ ;;
+-u)
+ NORMAL_USER_FLAG=1
+ shift
+ ;;
+-q)
+ WITH_QNETD_NODE=1
+ shift
+ ;;
+-n)
+ check_docker_env
+ shift
+ is_number $1 || fatal "-n option need a number larger than 0"
+ SETUP_N_NODES_CLUSTER=$1
+ shift
+ ;;
+--)
+ shift
+ break
+ ;;
+esac
+done
+
+if [ $SETUP_N_NODES_CLUSTER -ge 1 ];then
+ setup_cluster $SETUP_N_NODES_CLUSTER
+ exit $?
+fi
+
+if [ "$#" -eq 0 ];then
+ usage_and_exit
+fi
+
+# used by github action
+if [ "$1" == "_get_index_of" ];then
+ shift
+ pattern=""
+ for item in $*;do
+ pattern+="${item}|"
+ done
+ echo_test_cases|grep -E "(${pattern%%|})(\.feature|\s+Original regression test)"|awk '{print $1}'|tr -s '\n' ' '
+ exit 0
+fi
+
+for case_num in $*;do
+ echo_test_cases|grep -E "\s+$case_num\s" &> /dev/null
+ if [ "$?" -ne 0 ];then
+ error "\"$case_num\" is an invalid index"
+ echo_test_cases
+ exit 1
+ fi
+done
+
+for case_num in $*;do
+ if [ "$case_num" -ne $1 ];then
+ check_docker_env cleanup
+ cleanup_cluster
+ echo
+ fi
+ check_docker_env
+ test_case_array=(`get_test_case_array`)
+ if [ $case_num -gt ${#test_case_array[*]} ];then
+ run_origin_regression_test || exit 1
+ continue
+ fi
+ case_file=$BEHAVE_CASE_DIR/${test_case_array[$((case_num-1))]}
+ case_file_in_container="$PROJECT_INSIDE/test/features/`basename $case_file`"
+ node_arry=(`awk -F: '/Need nodes/{print $2}' $case_file`)
+ CONFIG_COROSYNC_FLAG=0
+ setup_cluster ${node_arry[@]}
+ adjust_test_case ${node_arry[0]} $case_file_in_container
+ echo
+ prepare_coverage_env "${node_arry[@]}"
+ if [ "$NORMAL_USER_FLAG" -eq 0 ];then
+ info "Running \"$case_file_in_container\" under 'root'..."
+ docker_exec ${node_arry[0]} "behave --no-logcapture $case_file_in_container || exit 1" || exit 1
+ else
+ info "Running \"$case_file_in_container\" under normal user 'alice'..."
+ docker_exec ${node_arry[0]} "su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
+ fi
+ fetch_coverage_report "${node_arry[@]}"
+ echo
+done
diff --git a/test/testcases/acl b/test/testcases/acl
new file mode 100644
index 0000000..ebc9531
--- /dev/null
+++ b/test/testcases/acl
@@ -0,0 +1,60 @@
+show ACL
+node node1
+property enable-acl=true
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive d0 ocf:pacemaker:Dummy
+primitive d1 ocf:pacemaker:Dummy
+role basic-read \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read property
+role basic-read-basic \
+ read cib
+role d0-admin \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0
+role silly-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read utilization:d0 \
+ read property:stonith-enabled \
+ write property \
+ read node \
+ read node:node1 \
+ read nodeattr \
+ read nodeattr:a1 \
+ read nodeutil \
+ read nodeutil:node1 \
+ read status \
+ read cib
+role silly-role-two \
+ read xpath:"//nodes//attributes" \
+ deny tag:nvpair \
+ deny ref:d0
+acl_target alice \
+ basic-read-basic
+acl_target bob \
+ d0-admin \
+ basic-read-basic
+role cyrus-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read type:node attribute:uname \
+ read type:node attribute:type \
+ read property
+acl_target cyrus cyrus-role
+_test
+verify
+.
diff --git a/test/testcases/acl.excl b/test/testcases/acl.excl
new file mode 100644
index 0000000..31d13f7
--- /dev/null
+++ b/test/testcases/acl.excl
@@ -0,0 +1 @@
+INFO: 5: already using schema pacemaker-1.2
diff --git a/test/testcases/acl.exp b/test/testcases/acl.exp
new file mode 100644
index 0000000..f00405c
--- /dev/null
+++ b/test/testcases/acl.exp
@@ -0,0 +1,94 @@
+.TRY ACL
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: property enable-acl=true
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d0 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d1 ocf:pacemaker:Dummy
+.INP: role basic-read read status read type:node attribute:uname read type:node attribute:type read property
+.INP: role basic-read-basic read cib
+.INP: role d0-admin write meta:d0:target-role write meta:d0:is-managed read ref:d0
+.INP: role silly-role write meta:d0:target-role write meta:d0:is-managed read ref:d0 read status read type:node attribute:uname read type:node attribute:type read utilization:d0 read property:stonith-enabled write property read node read node:node1 read nodeattr read nodeattr:a1 read nodeutil read nodeutil:node1 read status read cib
+.INP: role silly-role-two read xpath:"//nodes//attributes" deny tag:nvpair deny ref:d0
+.INP: acl_target alice basic-read-basic
+.INP: acl_target bob d0-admin basic-read-basic
+.INP: role cyrus-role write meta:d0:target-role write meta:d0:is-managed read ref:d0 read status read type:node attribute:uname read type:node attribute:type read property
+.INP: acl_target cyrus cyrus-role
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1
+primitive d0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+property cib-bootstrap-options: \
+ enable-acl=true
+role basic-read \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read property
+role basic-read-basic \
+ read cib
+role cyrus-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read property
+role d0-admin \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0
+role silly-role \
+ write meta:d0:target-role \
+ write meta:d0:is-managed \
+ read ref:d0 \
+ read status \
+ read attr:uname type:node \
+ read attr:type type:node \
+ read utilization:d0 \
+ read property:stonith-enabled \
+ write property \
+ read node \
+ read node:node1 \
+ read nodeattr \
+ read nodeattr:a1 \
+ read nodeutil \
+ read nodeutil:node1 \
+ read status \
+ read cib
+role silly-role-two \
+ read xpath:"//nodes//attributes" \
+ deny type:nvpair \
+ deny ref:d0
+acl_target alice \
+ basic-read-basic
+acl_target bob \
+ d0-admin \
+ basic-read-basic
+acl_target cyrus \
+ cyrus-role
+.INP: commit
diff --git a/test/testcases/basicset b/test/testcases/basicset
new file mode 100644
index 0000000..4f023bf
--- /dev/null
+++ b/test/testcases/basicset
@@ -0,0 +1,18 @@
+confbasic
+bundle
+confbasic-xml
+edit
+rset
+rset-xml
+delete
+node
+resource
+file
+shadow
+ra
+acl
+history
+newfeatures
+commit
+bugs
+scripts
diff --git a/test/testcases/bugs b/test/testcases/bugs
new file mode 100644
index 0000000..28219ae
--- /dev/null
+++ b/test/testcases/bugs
@@ -0,0 +1,79 @@
+session Configuration bugs
+options
+sort_elements false
+up
+configure
+erase
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p4 Dummy
+primitive p3 Dummy
+primitive p2 Dummy
+primitive p1 Dummy
+colocation c1 inf: p1 p2
+filter "sed 's/p1 p2/& p3/'" c1
+show c1
+delete c1
+colocation c2 inf: [ p1 p2 ] p3 p4
+filter "sed 's/\\\[/\\\(/;s/\\\]/\\\)/'" c2
+show c2
+primitive p5 Dummy
+primitive p6 Dummy
+clone cl-p5 p5
+show
+commit
+_test
+verify
+show
+.
+session Unordered load file
+options
+sort_elements false
+up
+configure
+load update bugs-test.txt
+show
+commit
+_test
+verify
+.
+session Unknown properties
+configure
+erase
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+show
+commit
+_test
+verify
+property SAPHanaSR_2: \
+ hana_ha1_site_iss_WDF1=cde \
+ hana_ha1_site_bss_WDF1=abc
+show
+commit
+_test
+verify
+.
+session template
+configure
+erase
+node node1
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+template
+new vip virtual-ip params ip=10.10.10.123
+load vip
+apply update
+up
+commit
+_test
+verify
+.
diff --git a/test/testcases/bugs.exp b/test/testcases/bugs.exp
new file mode 100644
index 0000000..af05e82
--- /dev/null
+++ b/test/testcases/bugs.exp
@@ -0,0 +1,215 @@
+.TRY Configuration bugs
+.INP: options
+.INP: sort_elements false
+WARNING: 2: This command 'sort_elements' is deprecated, please use 'sort-elements'
+INFO: 2: "sort_elements" is accepted as "sort-elements"
+.INP: up
+.INP: configure
+.INP: erase
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: primitive p4 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p3 Dummy
+.INP: primitive p2 Dummy
+.INP: primitive p1 Dummy
+.INP: colocation c1 inf: p1 p2
+.INP: filter "sed 's/p1 p2/& p3/'" c1
+.INP: show c1
+colocation c1 inf: p1 p2 p3
+.INP: delete c1
+.INP: colocation c2 inf: [ p1 p2 ] p3 p4
+.INP: filter "sed 's/\[/\(/;s/\]/\)/'" c2
+.INP: show c2
+colocation c2 inf: ( p1 p2 ) p3 p4
+.INP: primitive p5 Dummy
+.INP: primitive p6 Dummy
+.INP: clone cl-p5 p5
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+.INP: commit
+.INP: _test
+.INP: verify
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+.TRY Unordered load file
+.INP: options
+.INP: sort_elements false
+WARNING: 2: This command 'sort_elements' is deprecated, please use 'sort-elements'
+INFO: 2: "sort_elements" is accepted as "sort-elements"
+.INP: up
+.INP: configure
+.INP: load update bugs-test.txt
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1
+primitive p4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive gr1 Dummy
+primitive gr2 Dummy
+primitive gr3 Dummy
+primitive gr4 Dummy
+group g1 gr1 gr2
+group g2 gr3
+group g3 gr4
+clone cl-p5 p5 \
+ meta interleave=true
+colocation c2 inf: ( p1 p2 ) p3 p4
+location loc1 g1 \
+ rule 200: #uname eq node1
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: _test
+.INP: verify
+.TRY Unknown properties
+.INP: configure
+.INP: erase
+INFO: 2: constraint colocation:c2 updated
+INFO: 2: constraint colocation:c2 updated
+INFO: 2: modified location:loc1 from g1 to gr2
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: property SAPHanaSR: hana_ha1_site_lss_WDF1=4
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+.INP: commit
+.INP: _test
+.INP: verify
+.INP: property SAPHanaSR_2: hana_ha1_site_iss_WDF1=cde hana_ha1_site_bss_WDF1=abc
+.INP: show
+node node1
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property SAPHanaSR: \
+ hana_ha1_site_lss_WDF1=4
+property SAPHanaSR_2: \
+ hana_ha1_site_iss_WDF1=cde \
+ hana_ha1_site_bss_WDF1=abc
+.INP: commit
+.INP: _test
+.INP: verify
+.TRY template
+.INP: configure
+.INP: erase
+.INP: node node1
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: template
+.INP: new vip virtual-ip params ip=10.10.10.123
+INFO: 6: pulling in template virtual-ip
+.INP: load vip
+.INP: apply update
+.EXT crm_resource --show-metadata ocf:heartbeat:IPaddr
+.EXT crm_resource --list-ocf-alternatives IPaddr
+.INP: up
+.INP: commit
+.INP: _test
+.INP: verify
diff --git a/test/testcases/bundle b/test/testcases/bundle
new file mode 100644
index 0000000..463687d
--- /dev/null
+++ b/test/testcases/bundle
@@ -0,0 +1,20 @@
+show Basic configure
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped
+primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped
+bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1
+property stonith-enabled=true
+_test
+verify
+.
diff --git a/test/testcases/bundle.exp b/test/testcases/bundle.exp
new file mode 100644
index 0000000..f6284ce
--- /dev/null
+++ b/test/testcases/bundle.exp
@@ -0,0 +1,57 @@
+.TRY Basic configure
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: delete node1
+.INP: node node1 attributes mem=16G
+.INP: node node2 utilization cpu=4
+.INP: primitive st stonith:ssh params hostlist='node1 node2' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive st2 stonith:ssh params hostlist='node1 node2'
+.INP: bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped
+.INP: primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1
+.INP: property stonith-enabled=true
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1 \
+ attributes mem=16G
+node node2 \
+ utilization cpu=4
+primitive dummy Dummy \
+ meta target-role=Stopped \
+ op monitor interval=10 timeout=20s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist="node1 node2" \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+primitive st2 stonith:ssh \
+ params hostlist="node1 node2" \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+bundle bundle-test1 \
+ docker image=test \
+ network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 \
+ storage storage-mapping id=storage1 target-dir=test source-dir=test \
+ meta target-role=Stopped
+bundle bundle-test2 \
+ docker image=test \
+ network ip-range-start=10.10.10.123 \
+ primitive dummy \
+ meta target-role=Stopped priority=1
+property cib-bootstrap-options: \
+ stonith-enabled=true
+.INP: commit
diff --git a/test/testcases/commit b/test/testcases/commit
new file mode 100644
index 0000000..67b27c3
--- /dev/null
+++ b/test/testcases/commit
@@ -0,0 +1,39 @@
+show Commits of all kinds
+op_defaults timeout=2m
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta yoyo-meta="yoyo 2" requires=nothing \
+ op monitor interval=60m
+commit
+node node1 \
+ attributes mem=16G
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+primitive p2 ocf:heartbeat:Dummy
+primitive p3 ocf:heartbeat:Dummy
+group g1 p1 p2
+clone c1 g1
+location l1 p3 100: node1
+order o1 Mandatory: p3 c1
+colocation cl1 inf: c1 p3
+primitive d1 ocf:heartbeat:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+commit
+rename p3 pp3
+commit
+rename pp3 p3
+delete c1
+commit
+group g2 d1 d2
+commit
+delete g2
+commit
+filter "sed '/g1/s/p1/d1/'"
+group g2 d3 d2
+delete d2
+commit
+_test
+verify
+.
diff --git a/test/testcases/commit.exp b/test/testcases/commit.exp
new file mode 100644
index 0000000..59d291c
--- /dev/null
+++ b/test/testcases/commit.exp
@@ -0,0 +1,90 @@
+.TRY Commits of all kinds
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: op_defaults timeout=2m
+.INP: primitive st stonith:null params hostlist='node1' meta yoyo-meta="yoyo 2" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: commit
+WARNING: 7: st: unknown attribute 'yoyo-meta'
+.INP: node node1 attributes mem=16G
+.INP: primitive p1 ocf:heartbeat:Dummy op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p2 ocf:heartbeat:Dummy
+.INP: primitive p3 ocf:heartbeat:Dummy
+.INP: group g1 p1 p2
+.INP: clone c1 g1
+.INP: location l1 p3 100: node1
+.INP: order o1 Mandatory: p3 c1
+.INP: colocation cl1 inf: c1 p3
+.INP: primitive d1 ocf:heartbeat:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: commit
+.INP: rename p3 pp3
+INFO: 21: modified location:l1 from p3 to pp3
+INFO: 21: modified order:o1 from p3 to pp3
+INFO: 21: modified colocation:cl1 from p3 to pp3
+.INP: commit
+.INP: rename pp3 p3
+INFO: 23: modified location:l1 from pp3 to p3
+INFO: 23: modified order:o1 from pp3 to p3
+INFO: 23: modified colocation:cl1 from pp3 to p3
+.INP: delete c1
+INFO: 24: modified order:o1 from c1 to g1
+INFO: 24: modified colocation:cl1 from c1 to g1
+.INP: commit
+.INP: group g2 d1 d2
+.INP: commit
+.INP: delete g2
+.INP: commit
+.INP: filter "sed '/g1/s/p1/d1/'"
+.INP: group g2 d3 d2
+.INP: delete d2
+.INP: commit
+.INP: _test
+.INP: verify
+WARNING: 35: st: unknown attribute 'yoyo-meta'
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta yoyo-meta="yoyo 2" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 d1 p2
+group g2 d3
+colocation cl1 inf: g1 p3
+location l1 p3 100: node1
+order o1 Mandatory: p3 g1
+op_defaults op-options: \
+ timeout=2m
+.INP: commit
+INFO: 37: apparently there is nothing to commit
+INFO: 37: try changing something first
diff --git a/test/testcases/common.excl b/test/testcases/common.excl
new file mode 100644
index 0000000..4902553
--- /dev/null
+++ b/test/testcases/common.excl
@@ -0,0 +1,26 @@
+Could not send fail-count-p0=\(null\) update via attrd: connection failed
+Could not send fail-count-p0=<none> update via attrd: connection failed
+Could not send s1=\(null\) update via attrd: connection failed
+Could not send s1=<none> update via attrd: connection failed
+Error performing operation: The object/attribute does not exist
+Error setting fail-count-p0=5 \(section=status, set=status-node1\): The object/attribute does not exist
+Error setting s1=1 2 3 \(section=status, set=status-node1\): The object/attribute does not exist
+Error signing on to the CRMd service
+Error connecting to the controller
+Error performing operation: Transport endpoint is not connected
+Error performing operation: Not connected
+.EXT crm_resource --list-ocf-providers
+.EXT crm_resource --list-ocf-alternatives Delay
+.EXT crm_resource --list-ocf-alternatives Dummy
+^\.EXT crmd version
+^\.EXT cibadmin \-Ql
+^\.EXT crm_verify \-VV \-p
+^\.EXT cibadmin \-p \-P
+^\.EXT crm_diff \-\-help
+^\.EXT crm_diff \-o [^ ]+ \-n \-
+^\.EXT crm_diff \-\-no\-version \-o [^ ]+ \-n \-
+^\.EXT sed ['][^']+
+^\.EXT sed ["][^"]+
+^\.EXT [a-zA-Z]+ validate-all
+^[ ]+File ["][^"]+
+^.*\: ([0-9]+\: )?\(cluster\_status\) warning\: Fencing and resource management disabled due to lack of quorum
diff --git a/test/testcases/common.filter b/test/testcases/common.filter
new file mode 100755
index 0000000..03846c2
--- /dev/null
+++ b/test/testcases/common.filter
@@ -0,0 +1,9 @@
+#!/usr/bin/awk -f
+# 1. replace .EXT [path/]<cmd> <parameter> with .EXT <cmd> <parameter>
+/\.EXT \/(.+)/ { gsub(/\/.*\//, "", $2) }
+/\.EXT >\/dev\/null 2>&1 \/(.+)/ { gsub(/\/.*\//, "", $4) }
+/\.EXT pacemaker-fenced/ { gsub(/pacemaker-fenced/,"stonithd") }
+/\.EXT pacemaker-controld/ { gsub(/pacemaker-controld/,"crmd") }
+/\.EXT pacemaker-schedulerd/ { gsub(/pacemaker-schedulerd/,"pengine") }
+/\.EXT pacemaker-based/ { gsub(/pacemaker-based/,"cib") }
+{ print }
diff --git a/test/testcases/confbasic b/test/testcases/confbasic
new file mode 100644
index 0000000..b06016b
--- /dev/null
+++ b/test/testcases/confbasic
@@ -0,0 +1,91 @@
+show Basic configure
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+monitor d1 60s:30s
+primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s
+monitor d2:Started 60s:30s
+group g1 d1 d2
+primitive d3 ocf:pacemaker:Dummy
+clone c d3 \
+ meta clone-max=1
+primitive d4 ocf:pacemaker:Dummy
+ms m d4
+delete m
+master m d4
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1
+ms m5 s5
+ms m6 s6
+primitive d7 Dummy \
+ params rule inf: #uname eq node1 fake=1 \
+ params rule inf: #uname eq node2 fake=2 \
+ op start interval=0 timeout=60s \
+ op_params 2: rule #uname eq node1 op_param=dummy \
+ op_params 1: op_param=smart \
+ op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m \
+ op_meta 1: start-delay=60m
+primitive d8 ocf:pacemaker:Dummy
+clone m7 d8 \
+ meta promotable=true \
+ meta promoted-max=1 \
+ meta promoted-node-max=1
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt "2009-05-26" and \
+ date in start="2009-05-26" end="2009-07-26" and \
+ date in start="2009-05-26" years="2009" and \
+ date spec years="2009" hours="09-17"
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2
+collocation c1 inf: m6 m5
+collocation c2 inf: m5:Master d1:Started
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+fencing_topology st st2
+property stonith-enabled=true
+property $id=cpset2 maintenance-mode=true
+rsc_defaults failure-timeout=10m
+op_defaults $id=opsdef2 rule 100: #uname eq node1 record-pending=true
+tag t1: m5 m6
+set d2.mondelay 45
+_test
+verify
+.
+-F node maintenance node1
+-F resource maintenance g1 off
+-F resource maintenance d1
+-F configure property maintenance-mode=true
diff --git a/test/testcases/confbasic-xml b/test/testcases/confbasic-xml
new file mode 100644
index 0000000..58433f5
--- /dev/null
+++ b/test/testcases/confbasic-xml
@@ -0,0 +1,72 @@
+showxml Basic configure (xml dump)
+node node1
+delete node1
+node node1 \
+ attributes mem=16G
+node node2 utilization cpu=4
+primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive st2 stonith:ssh \
+ params hostlist='node1 node2'
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+monitor d1 60s:30s
+primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s
+monitor d2:Started 60s:30s
+group g1 d1 d2
+primitive d3 ocf:pacemaker:Dummy
+clone c d3 \
+ meta clone-max=1
+primitive d4 ocf:pacemaker:Dummy
+ms m d4
+delete m
+master m d4
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1
+ms m5 s5
+ms m6 s6
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt 2009-05-26 and \
+ date in start=2009-05-26 end=2009-07-26 and \
+ date in start=2009-05-26 years=2009 and \
+ date spec years=2009 hours=09-17
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2
+collocation c1 inf: m6 m5
+collocation c2 inf: m5:Master d1:Started
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+fencing_topology st st2
+property stonith-enabled=true
+property $id=cpset2 maintenance-mode=true
+rsc_defaults failure-timeout=10m
+op_defaults $id=opsdef2 record-pending=true
+_test
+verify
+.
diff --git a/test/testcases/confbasic-xml.exp b/test/testcases/confbasic-xml.exp
new file mode 100644
index 0000000..20892dc
--- /dev/null
+++ b/test/testcases/confbasic-xml.exp
@@ -0,0 +1,206 @@
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="true" id="cib-bootstrap-options-stonith-enabled"/>
+ </cluster_property_set>
+ <cluster_property_set id="cpset2">
+ <nvpair name="maintenance-mode" value="true" id="cpset2-maintenance-mode"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="node1-instance_attributes">
+ <nvpair name="mem" value="16G" id="node1-instance_attributes-mem"/>
+ </instance_attributes>
+ </node>
+ <node uname="node2" id="node2">
+ <utilization id="node2-utilization">
+ <nvpair name="cpu" value="4" id="node2-utilization-cpu"/>
+ </utilization>
+ </node>
+ </nodes>
+ <resources>
+ <primitive id="st" class="stonith" type="ssh">
+ <instance_attributes id="st-instance_attributes">
+ <nvpair name="hostlist" value="node1 node2" id="st-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <meta_attributes id="st-meta_attributes">
+ <nvpair name="target-role" value="Started" id="st-meta_attributes-target-role"/>
+ <nvpair name="requires" value="nothing" id="st-meta_attributes-requires"/>
+ </meta_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="st-start-0s"/>
+ <op name="monitor" interval="60m" timeout="60s" id="st-monitor-60m"/>
+ <op name="stop" timeout="15" interval="0s" id="st-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="st2" class="stonith" type="ssh">
+ <instance_attributes id="st2-instance_attributes">
+ <nvpair name="hostlist" value="node1 node2" id="st2-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="20" interval="3600" id="st2-monitor-3600"/>
+ <op name="start" timeout="20" interval="0s" id="st2-start-0s"/>
+ <op name="stop" timeout="15" interval="0s" id="st2-stop-0s"/>
+ </operations>
+ </primitive>
+ <group id="g1">
+ <primitive id="d1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations id="d1-ops">
+ <op name="monitor" interval="60m" timeout="20s" id="d1-ops-monitor-60m"/>
+ <op name="monitor" interval="120m" timeout="20s" id="d1-ops-monitor-120m">
+ <instance_attributes id="d1-ops-monitor-120m-instance_attributes">
+ <nvpair name="OCF_CHECK_LEVEL" value="10" id="d1-ops-monitor-120m-instance_attributes-OCF_CHECK_LEVEL"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="d1-ops-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d1-ops-stop-0s"/>
+ <op name="monitor" interval="60s" timeout="30s" id="d1-monitor-60s"/>
+ </operations>
+ </primitive>
+ <primitive id="d2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="d2-instance_attributes">
+ <nvpair name="mondelay" value="60" id="d2-instance_attributes-mondelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="d2-start-0s"/>
+ <op name="stop" timeout="60s" interval="0s" id="d2-stop-0s"/>
+ <op name="monitor" timeout="30s" interval="10s" id="d2-monitor-10s"/>
+ <op name="monitor" role="Started" interval="60s" timeout="30s" id="d2-monitor-60s"/>
+ </operations>
+ </primitive>
+ </group>
+ <clone id="c">
+ <meta_attributes id="c-meta_attributes">
+ <nvpair name="clone-max" value="1" id="c-meta_attributes-clone-max"/>
+ <nvpair name="interleave" value="true" id="c-meta_attributes-interleave"/>
+ </meta_attributes>
+ <primitive id="d3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d3-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ <master id="m">
+ <primitive id="d4" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d4-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d4-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d4-stop-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ <master id="m5">
+ <primitive id="s5" class="ocf" provider="pacemaker" type="Stateful">
+ <operations id-ref="d1-ops">
+ <op name="monitor" timeout="20s" interval="10s" role="Promoted" id="s5-monitor-10s"/>
+ <op name="monitor" timeout="20s" interval="11s" role="Unpromoted" id="s5-monitor-11s"/>
+ <op name="start" timeout="20s" interval="0s" id="s5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="s5-stop-0s"/>
+ <op name="promote" timeout="10s" interval="0s" id="s5-promote-0s"/>
+ <op name="demote" timeout="10s" interval="0s" id="s5-demote-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ <master id="m6">
+ <primitive id="s6" class="ocf" provider="pacemaker" type="Stateful">
+ <operations id-ref="d1-ops">
+ <op name="monitor" timeout="20s" interval="10s" role="Promoted" id="s6-monitor-10s"/>
+ <op name="monitor" timeout="20s" interval="11s" role="Unpromoted" id="s6-monitor-11s"/>
+ <op name="start" timeout="20s" interval="0s" id="s6-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="s6-stop-0s"/>
+ <op name="promote" timeout="10s" interval="0s" id="s6-promote-0s"/>
+ <op name="demote" timeout="10s" interval="0s" id="s6-demote-0s"/>
+ </operations>
+ </primitive>
+ </master>
+ </resources>
+ <constraints>
+ <rsc_location id="l1" rsc="g1" score="100" node="node1"/>
+ <rsc_location id="l2" rsc="c">
+ <rule id="l2-rule1" score="100">
+ <expression operation="eq" attribute="#uname" value="node1" id="l2-rule1-expression"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l3" rsc="m5">
+ <rule score="INFINITY" id="l3-rule">
+ <expression operation="eq" attribute="#uname" value="node1" id="l3-rule-expression"/>
+ <expression operation="gt" attribute="pingd" value="0" id="l3-rule-expression-0"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l4" rsc="m5">
+ <rule score="-INFINITY" boolean-op="or" id="l4-rule">
+ <expression operation="not_defined" attribute="pingd" id="l4-rule-expression"/>
+ <expression operation="lte" attribute="pingd" value="0" id="l4-rule-expression-0"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l5" rsc="m5">
+ <rule score="-INFINITY" boolean-op="or" id="l5-rule">
+ <expression operation="not_defined" attribute="pingd" id="l5-rule-expression"/>
+ <expression operation="lte" attribute="pingd" value="0" id="l5-rule-expression-0"/>
+ </rule>
+ <rule score="INFINITY" id="l5-rule-0">
+ <expression operation="eq" attribute="#uname" value="node1" id="l5-rule-0-expression"/>
+ <expression operation="gt" attribute="pingd" value="0" id="l5-rule-0-expression-0"/>
+ </rule>
+ <rule score="INFINITY" id="l5-rule-1">
+ <date_expression operation="lt" end="2009-05-26" id="l5-rule-1-expression"/>
+ <date_expression operation="in_range" start="2009-05-26" end="2009-07-26" id="l5-rule-1-expression-0"/>
+ <date_expression operation="in_range" start="2009-05-26" id="l5-rule-1-expression-1">
+ <duration years="2009" id="l5-rule-1-expression-1-duration"/>
+ </date_expression>
+ <date_expression operation="date_spec" id="l5-rule-1-expression-2">
+ <date_spec years="2009" hours="09-17" id="l5-rule-1-expression-2-date_spec"/>
+ </date_expression>
+ </rule>
+ </rsc_location>
+ <rsc_location id="l6" rsc="m5">
+ <rule id-ref="l2-rule1"/>
+ </rsc_location>
+ <rsc_location id="l7" rsc="m5">
+ <rule id-ref="l2-rule1"/>
+ </rsc_location>
+ <rsc_colocation id="c1" score="INFINITY" rsc="m6" with-rsc="m5"/>
+ <rsc_colocation id="c2" score="INFINITY" rsc="m5" rsc-role="Master" with-rsc="d1" with-rsc-role="Started"/>
+ <rsc_order id="o1" kind="Mandatory" first="m5" then="m6"/>
+ <rsc_order id="o2" kind="Optional" first="d1" first-action="start" then="m5" then-action="promote"/>
+ <rsc_order id="o3" kind="Serialize" first="m5" then="m6"/>
+ <rsc_order id="o4" kind="Mandatory" first="m5" then="m6"/>
+ <rsc_ticket id="ticket-A_m6" ticket="ticket-A" rsc="m6"/>
+ <rsc_ticket id="ticket-B_m6_m5" ticket="ticket-B" loss-policy="fence">
+ <resource_set id="ticket-B_m6_m5-0">
+ <resource_ref id="m6"/>
+ <resource_ref id="m5"/>
+ </resource_set>
+ </rsc_ticket>
+ <rsc_ticket id="ticket-C_master" ticket="ticket-C" loss-policy="fence">
+ <resource_set id="ticket-C_master-0">
+ <resource_ref id="m6"/>
+ </resource_set>
+ <resource_set role="Master" id="ticket-C_master-1">
+ <resource_ref id="m5"/>
+ </resource_set>
+ </rsc_ticket>
+ </constraints>
+ <fencing-topology>
+ <fencing-level target="node1" index="1" devices="st" id="fencing"/>
+ <fencing-level target="node1" index="2" devices="st2" id="fencing-0"/>
+ <fencing-level target="node2" index="1" devices="st" id="fencing-1"/>
+ <fencing-level target="node2" index="2" devices="st2" id="fencing-2"/>
+ </fencing-topology>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="failure-timeout" value="10m" id="rsc-options-failure-timeout"/>
+ </meta_attributes>
+ </rsc_defaults>
+ <op_defaults>
+ <meta_attributes id="opsdef2">
+ <nvpair name="record-pending" value="true" id="opsdef2-record-pending"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+</cib>
diff --git a/test/testcases/confbasic-xml.filter b/test/testcases/confbasic-xml.filter
new file mode 100755
index 0000000..7b677da
--- /dev/null
+++ b/test/testcases/confbasic-xml.filter
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -v "WARNING"
diff --git a/test/testcases/confbasic.exp b/test/testcases/confbasic.exp
new file mode 100644
index 0000000..5fc2dff
--- /dev/null
+++ b/test/testcases/confbasic.exp
@@ -0,0 +1,199 @@
+.TRY Basic configure
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: delete node1
+.INP: node node1 attributes mem=16G
+.INP: node node2 utilization cpu=4
+.INP: primitive st stonith:ssh params hostlist='node1 node2' meta target-role="Started" op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive st2 stonith:ssh params hostlist='node1 node2'
+.INP: primitive d1 ocf:pacemaker:Dummy operations $id=d1-ops op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: monitor d1 60s:30s
+.INP: primitive d2 ocf:heartbeat:Delay params mondelay=60 op start timeout=60s op stop timeout=60s
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.INP: monitor d2:Started 60s:30s
+.INP: group g1 d1 d2
+.INP: primitive d3 ocf:pacemaker:Dummy
+.INP: clone c d3 meta clone-max=1
+.INP: primitive d4 ocf:pacemaker:Dummy
+.INP: ms m d4
+WARNING: 19: "ms" is deprecated. Please use "clone m d4 meta promotable=true"
+.INP: delete m
+.INP: master m d4
+WARNING: 21: This command 'master' is deprecated, please use 'ms'
+INFO: 21: "master" is accepted as "ms"
+WARNING: 21: "ms" is deprecated. Please use "clone m d4 meta promotable=true"
+.INP: primitive s5 ocf:pacemaker:Stateful operations $id-ref=d1-ops
+.EXT crm_resource --show-metadata ocf:pacemaker:Stateful
+.INP: primitive s6 ocf:pacemaker:Stateful operations $id-ref=d1
+.INP: ms m5 s5
+WARNING: 24: "ms" is deprecated. Please use "clone m5 s5 meta promotable=true"
+.INP: ms m6 s6
+WARNING: 25: "ms" is deprecated. Please use "clone m6 s6 meta promotable=true"
+.INP: primitive d7 Dummy params rule inf: #uname eq node1 fake=1 params rule inf: #uname eq node2 fake=2 op start interval=0 timeout=60s op_params 2: rule #uname eq node1 op_param=dummy op_params 1: op_param=smart op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m op_meta 1: start-delay=60m
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive d8 ocf:pacemaker:Dummy
+.INP: clone m7 d8 meta promotable=true meta promoted-max=1 meta promoted-node-max=1
+.INP: location l1 g1 100: node1
+.INP: location l2 c rule $id=l2-rule1 100: #uname eq node1
+.INP: location l3 m5 rule inf: #uname eq node1 and pingd gt 0
+.INP: location l4 m5 rule -inf: not_defined pingd or pingd lte 0
+.INP: location l5 m5 rule -inf: not_defined pingd or pingd lte 0 rule inf: #uname eq node1 and pingd gt 0 rule inf: date lt "2009-05-26" and date in start="2009-05-26" end="2009-07-26" and date in start="2009-05-26" years="2009" and date spec years="2009" hours="09-17"
+.INP: location l6 m5 rule $id-ref=l2-rule1
+.INP: location l7 m5 rule $id-ref=l2
+.INP: collocation c1 inf: m6 m5
+WARNING: 36: This command 'collocation' is deprecated, please use 'colocation'
+INFO: 36: "collocation" is accepted as "colocation"
+.INP: collocation c2 inf: m5:Master d1:Started
+WARNING: 37: This command 'collocation' is deprecated, please use 'colocation'
+INFO: 37: "collocation" is accepted as "colocation"
+.INP: order o1 Mandatory: m5 m6
+.INP: order o2 Optional: d1:start m5:promote
+.INP: order o3 Serialize: m5 m6
+.INP: order o4 Mandatory: m5 m6
+.INP: rsc_ticket ticket-A_m6 ticket-A: m6
+.INP: rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+.INP: rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+.INP: fencing_topology st st2
+.INP: property stonith-enabled=true
+.INP: property $id=cpset2 maintenance-mode=true
+.INP: rsc_defaults failure-timeout=10m
+.INP: op_defaults $id=opsdef2 rule 100: #uname eq node1 record-pending=true
+.INP: tag t1: m5 m6
+.INP: set d2.mondelay 45
+.INP: _test
+.INP: verify
+WARNING: 53: c2: resource d1 is grouped, constraints should apply to the group
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: show
+node node1 \
+ attributes mem=16G
+node node2 \
+ utilization cpu=4
+primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op monitor interval=60s timeout=30s
+primitive d2 Delay \
+ params mondelay=45 \
+ op start timeout=60s interval=0s \
+ op stop timeout=60s interval=0s \
+ op monitor timeout=30s interval=10s \
+ op monitor role=Started interval=60s timeout=30s
+primitive d3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d7 Dummy \
+ params rule #uname eq node1 fake=1 \
+ params rule #uname eq node2 fake=2 \
+ op start interval=0 timeout=60s \
+ op_params 2: rule #uname eq node1 op_param=dummy \
+ op_params 1: op_param=smart \
+ op_meta 2: rule #ra-version version:gt 1.0 start-delay=120m \
+ op_meta 1: start-delay=60m \
+ op monitor timeout=20s interval=10s \
+ op stop timeout=20s interval=0s
+primitive d8 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops \
+ op monitor timeout=20s interval=10s role=Promoted \
+ op monitor timeout=20s interval=11s role=Unpromoted \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op promote timeout=10s interval=0s \
+ op demote timeout=10s interval=0s
+primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops \
+ op monitor timeout=20s interval=10s role=Promoted \
+ op monitor timeout=20s interval=11s role=Unpromoted \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s \
+ op promote timeout=10s interval=0s \
+ op demote timeout=10s interval=0s
+primitive st stonith:ssh \
+ params hostlist="node1 node2" \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+primitive st2 stonith:ssh \
+ params hostlist="node1 node2" \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 d1 d2
+ms m d4
+ms m5 s5
+ms m6 s6
+clone c d3 \
+ meta clone-max=1 interleave=true
+clone m7 d8 \
+ meta promotable=true interleave=true \
+ meta promoted-max=1 \
+ meta promoted-node-max=1
+tag t1 m5 m6
+colocation c1 inf: m6 m5
+colocation c2 inf: m5:Master d1:Started
+location l1 g1 100: node1
+location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1
+location l3 m5 \
+ rule #uname eq node1 and pingd gt 0
+location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0
+location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule #uname eq node1 and pingd gt 0 \
+ rule date lt 2009-05-26 and date in start=2009-05-26 end=2009-07-26 and date in start=2009-05-26 years=2009 and date spec years=2009 hours=09-17
+location l6 m5 \
+ rule $id-ref=l2-rule1
+location l7 m5 \
+ rule $id-ref=l2-rule1
+order o1 Mandatory: m5 m6
+order o2 Optional: d1:start m5:promote
+order o3 Serialize: m5 m6
+order o4 Mandatory: m5 m6
+fencing_topology st st2
+rsc_ticket ticket-A_m6 ticket-A: m6
+rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence
+rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence
+property cib-bootstrap-options: \
+ stonith-enabled=true
+property cpset2: \
+ maintenance-mode=true
+rsc_defaults rsc-options: \
+ failure-timeout=10m
+op_defaults opsdef2: \
+ rule 100: #uname eq node1 \
+ record-pending=true
+.INP: commit
+WARNING: 55: c2: resource d1 is grouped, constraints should apply to the group
+.TRY -F node maintenance node1
+.TRY -F resource maintenance g1 off
+.TRY -F resource maintenance d1
+.TRY -F configure property maintenance-mode=true
+INFO: 'maintenance' attribute already exists in d1. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in g1. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in node1. Remove it? [YES]
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
diff --git a/test/testcases/delete b/test/testcases/delete
new file mode 100644
index 0000000..7d0dc57
--- /dev/null
+++ b/test/testcases/delete
@@ -0,0 +1,64 @@
+session Delete/Rename test
+configure
+# erase to start from scratch
+erase
+erase nodes
+node node1
+# create one stonith so that verify does not complain
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:pacemaker:Dummy
+location d1-pref d1 100: node1
+show
+_test
+rename d1 p1
+show
+# delete primitive
+delete d2
+_test
+show
+# delete primitive with constraint
+delete p1
+_test
+show
+primitive d1 ocf:pacemaker:Dummy
+location d1-pref d1 100: node1
+_test
+# delete primitive belonging to a group
+primitive d2 ocf:pacemaker:Dummy
+_test
+group g1 d2 d1
+delete d2
+show
+_test
+delete g1
+show
+verify
+# delete a group which is in a clone
+primitive d2 ocf:pacemaker:Dummy
+group g1 d2 d1
+clone c1 g1
+delete g1
+show
+_test
+group g1 d2 d1
+clone c1 g1
+_test
+# delete group from a clone (again)
+delete g1
+show
+_test
+group g1 d2 d1
+clone c1 g1
+# delete primitive and its group and their clone
+delete d2 d1 c1 g1
+show
+_test
+# verify
+verify
+commit
+.
diff --git a/test/testcases/delete.exp b/test/testcases/delete.exp
new file mode 100644
index 0000000..87b1a7a
--- /dev/null
+++ b/test/testcases/delete.exp
@@ -0,0 +1,194 @@
+.TRY Delete/Rename test
+.INP: configure
+.INP: # erase to start from scratch
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: # create one stonith so that verify does not complain
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d1 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: location d1-pref d1 100: node1
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d1 100: node1
+.INP: _test
+.INP: rename d1 p1
+INFO: 13: modified location:d1-pref from d1 to p1
+.INP: show
+node node1
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref p1 100: node1
+.INP: # delete primitive
+.INP: delete d2
+.INP: _test
+.INP: show
+node node1
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref p1 100: node1
+.INP: # delete primitive with constraint
+.INP: delete p1
+INFO: 20: hanging location:d1-pref deleted
+.INP: _test
+.INP: show
+node node1
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+.INP: primitive d1 ocf:pacemaker:Dummy
+.INP: location d1-pref d1 100: node1
+.INP: _test
+.INP: # delete primitive belonging to a group
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 29: modified location:d1-pref from d1 to g1
+.INP: delete d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+group g1 d1
+location d1-pref g1 100: node1
+.INP: _test
+.INP: delete g1
+INFO: 33: modified location:d1-pref from g1 to d1
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d1 100: node1
+.INP: verify
+.INP: # delete a group which is in a clone
+.INP: primitive d2 ocf:pacemaker:Dummy
+.INP: group g1 d2 d1
+INFO: 38: modified location:d1-pref from d1 to g1
+.INP: clone c1 g1
+INFO: 39: modified location:d1-pref from g1 to c1
+.INP: delete g1
+INFO: 40: modified location:d1-pref from c1 to g1
+INFO: 40: modified location:d1-pref from g1 to d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d2 100: node1
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 43: modified location:d1-pref from d2 to g1
+.INP: clone c1 g1
+INFO: 44: modified location:d1-pref from g1 to c1
+.INP: _test
+.INP: # delete group from a clone (again)
+.INP: delete g1
+INFO: 47: modified location:d1-pref from c1 to g1
+INFO: 47: modified location:d1-pref from g1 to d2
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+location d1-pref d2 100: node1
+.INP: _test
+.INP: group g1 d2 d1
+INFO: 50: modified location:d1-pref from d2 to g1
+.INP: clone c1 g1
+INFO: 51: modified location:d1-pref from g1 to c1
+.INP: # delete primitive and its group and their clone
+.INP: delete d2 d1 c1 g1
+INFO: 53: modified location:d1-pref from c1 to g1
+INFO: 53: modified location:d1-pref from g1 to d2
+INFO: 53: hanging location:d1-pref deleted
+.INP: show
+node node1
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+.INP: _test
+.INP: # verify
+.INP: verify
+.INP: commit
diff --git a/test/testcases/edit b/test/testcases/edit
new file mode 100644
index 0000000..7deb115
--- /dev/null
+++ b/test/testcases/edit
@@ -0,0 +1,95 @@
+show Configuration editing
+op_defaults timeout=2m
+node node1 \
+ attributes mem=16G
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+filter "sed '$aprimitive p2 ocf:heartbeat:Dummy'"
+filter "sed '$agroup g1 p1 p2'"
+show
+filter "sed 's/p2/p3/;$aprimitive p3 ocf:heartbeat:Dummy'" g1
+show
+filter "sed '$aclone c1 p2'"
+filter "sed 's/p2/g1/'" c1
+filter "sed '/clone/s/g1/p2/'" c1 g1
+filter "sed '/clone/s/p2/g1/;s/p3/p2/'" c1 g1
+filter "sed '1,$d'" c1 g1
+filter "sed -e '$aclone c1 g1' -e '$agroup g1 p1 p2'"
+location l1 p3 100: node1
+order o1 Mandatory: p3 c1
+colocation cl1 inf: c1 p3
+filter "sed '/cl1/s/p3/p2/'"
+filter "sed '/cl1/d'"
+primitive d1 ocf:heartbeat:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+group g2 d1 d2
+filter "sed '/g2/s/d1/p1/;/g1/s/p1/d1/'"
+filter "sed '/g1/s/d1/p1/;/g2/s/p1/d1/'"
+filter "sed '$alocation loc-d1 d1 rule $id=r1 -inf: not_defined webserver rule $id=r2 webserver: defined webserver'"
+filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+filter "sed 's/ or mem number:lte 0//'" loc-d1
+filter "sed 's/not_defined webserver/& rule -inf: not_defined a2/'" loc-d1
+filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+modgroup g1 add d3
+modgroup g1 remove p1
+modgroup g1 add p1 after p2
+modgroup g1 remove p1
+modgroup g1 add p1 before p2
+modgroup g1 add p1
+modgroup g1 remove st
+modgroup g1 remove c1
+modgroup g1 remove nosuch
+modgroup g1 add c1
+modgroup g1 add nosuch
+filter "sed 's/^/# this is a comment\\n/'" loc-d1
+rsc_defaults $id="rsc_options" failure-timeout=10m
+filter "sed 's/2m/60s/'" op-options
+show op-options
+property stonith-enabled=true
+show cib-bootstrap-options
+filter 'sed "s/stonith-enabled=true//"'
+show cib-bootstrap-options
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+primitive d6 ocf:heartbeat:Dummy
+order o-d456 d4 d5 d6
+tag t-d45: d4 d5
+show type:order
+show related:d4
+show
+commit
+_test
+verify
+primitive a0 ocf:heartbeat:Dummy
+primitive a1 ocf:heartbeat:Dummy
+primitive a2 ocf:heartbeat:Dummy
+primitive a3 ocf:heartbeat:Dummy
+primitive a4 ocf:heartbeat:Dummy
+primitive a5 ocf:heartbeat:Dummy
+primitive a6 ocf:heartbeat:Dummy
+primitive a7 ocf:heartbeat:Dummy
+primitive a8 ocf:heartbeat:Dummy
+primitive a9 ocf:heartbeat:Dummy
+primitive aErr ocf:heartbeat:Dummy
+group as a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aErr
+commit
+cd ..
+cd configure
+filter "sed '/as/s/a9//'"
+filter "sed '/as/s/a1/a1 a9/'"
+commit
+cd ..
+cd configure
+filter "sed '/abs/s/a9//'"
+filter "sed '/abs/s/a8/a8 a9/'"
+show
+commit
+_test
+verify
+.
diff --git a/test/testcases/edit.excl b/test/testcases/edit.excl
new file mode 100644
index 0000000..3589a25
--- /dev/null
+++ b/test/testcases/edit.excl
@@ -0,0 +1 @@
+^\.EXT sed \-[re] ['][^']
diff --git a/test/testcases/edit.exp b/test/testcases/edit.exp
new file mode 100644
index 0000000..3d3bc0b
--- /dev/null
+++ b/test/testcases/edit.exp
@@ -0,0 +1,437 @@
+.TRY Configuration editing
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: op_defaults timeout=2m
+.INP: node node1 attributes mem=16G
+.INP: primitive st stonith:null params hostlist='node1' meta description="some description here" requires=nothing op monitor interval=60m
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.INP: primitive p1 ocf:heartbeat:Dummy op monitor interval=60m op monitor interval=120m OCF_CHECK_LEVEL=10
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: filter "sed '$aprimitive p2 ocf:heartbeat:Dummy'"
+.INP: filter "sed '$agroup g1 p1 p2'"
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p2
+op_defaults op-options: \
+ timeout=2m
+.INP: filter "sed 's/p2/p3/;$aprimitive p3 ocf:heartbeat:Dummy'" g1
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p3
+op_defaults op-options: \
+ timeout=2m
+.INP: filter "sed '$aclone c1 p2'"
+.INP: filter "sed 's/p2/g1/'" c1
+.INP: filter "sed '/clone/s/g1/p2/'" c1 g1
+.INP: filter "sed '/clone/s/p2/g1/;s/p3/p2/'" c1 g1
+.INP: filter "sed '1,$d'" c1 g1
+.INP: filter "sed -e '$aclone c1 g1' -e '$agroup g1 p1 p2'"
+.INP: location l1 p3 100: node1
+.INP: order o1 Mandatory: p3 c1
+.INP: colocation cl1 inf: c1 p3
+.INP: filter "sed '/cl1/s/p3/p2/'"
+.INP: filter "sed '/cl1/d'"
+.INP: primitive d1 ocf:heartbeat:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: group g2 d1 d2
+.INP: filter "sed '/g2/s/d1/p1/;/g1/s/p1/d1/'"
+ERROR: 29: Cannot create group:g1: Child primitive:d1 already in group:g2
+.INP: filter "sed '/g1/s/d1/p1/;/g2/s/p1/d1/'"
+.INP: filter "sed '$alocation loc-d1 d1 rule $id=r1 -inf: not_defined webserver rule $id=r2 webserver: defined webserver'"
+.INP: filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+.INP: filter "sed 's/ or mem number:lte 0//'" loc-d1
+.INP: filter "sed 's/not_defined webserver/& rule -inf: not_defined a2/'" loc-d1
+.INP: filter "sed 's/not_defined webserver/& or mem number:lte 0/'" loc-d1
+.INP: modgroup g1 add d3
+.INP: modgroup g1 remove p1
+.INP: modgroup g1 add p1 after p2
+.INP: modgroup g1 remove p1
+.INP: modgroup g1 add p1 before p2
+.INP: modgroup g1 add p1
+ERROR: 1: syntax in group: child p1 listed more than once in group g1 parsing 'group g1 p1 p2 d3 p1'
+.INP: modgroup g1 remove st
+ERROR: 42: configure.modgroup: st is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: st is not member of g1
+.INP: modgroup g1 remove c1
+ERROR: 43: configure.modgroup: c1 is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: c1 is not member of g1
+.INP: modgroup g1 remove nosuch
+ERROR: 44: configure.modgroup: nosuch is not member of g1
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s is not member of %s" % (prim_id, group_id))
+ raise ValueError(msg)
+ValueError: nosuch is not member of g1
+.INP: modgroup g1 add c1
+ERROR: 45: a group may contain only primitives; c1 is clone
+.INP: modgroup g1 add nosuch
+ERROR: 46: g1 refers to missing object nosuch
+.INP: filter "sed 's/^/# this is a comment\n/'" loc-d1
+.INP: rsc_defaults $id="rsc_options" failure-timeout=10m
+.INP: filter "sed 's/2m/60s/'" op-options
+.INP: show op-options
+op_defaults op-options: \
+ timeout=60s
+.INP: property stonith-enabled=true
+.INP: show cib-bootstrap-options
+property cib-bootstrap-options: \
+ stonith-enabled=true
+.INP: filter 'sed "s/stonith-enabled=true//"'
+.INP: show cib-bootstrap-options
+property cib-bootstrap-options:
+.INP: primitive d4 ocf:heartbeat:Dummy
+.INP: primitive d5 ocf:heartbeat:Dummy
+.INP: primitive d6 ocf:heartbeat:Dummy
+.INP: order o-d456 d4 d5 d6
+.INP: tag t-d45: d4 d5
+.INP: show type:order
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+.INP: show related:d4
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+tag t-d45 d4 d5
+order o-d456 d4 d5 d6
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: _test
+.INP: verify
+.INP: primitive a0 ocf:heartbeat:Dummy
+.INP: primitive a1 ocf:heartbeat:Dummy
+.INP: primitive a2 ocf:heartbeat:Dummy
+.INP: primitive a3 ocf:heartbeat:Dummy
+.INP: primitive a4 ocf:heartbeat:Dummy
+.INP: primitive a5 ocf:heartbeat:Dummy
+.INP: primitive a6 ocf:heartbeat:Dummy
+.INP: primitive a7 ocf:heartbeat:Dummy
+.INP: primitive a8 ocf:heartbeat:Dummy
+.INP: primitive a9 ocf:heartbeat:Dummy
+.INP: primitive aErr ocf:heartbeat:Dummy
+.INP: group as a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aErr
+.INP: commit
+.INP: cd ..
+.INP: cd configure
+.INP: filter "sed '/as/s/a9//'"
+.INP: filter "sed '/as/s/a1/a1 a9/'"
+.INP: commit
+.INP: cd ..
+.INP: cd configure
+.INP: filter "sed '/abs/s/a9//'"
+.INP: filter "sed '/abs/s/a8/a8 a9/'"
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive a0 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a7 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a8 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a9 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive aErr Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group as a0 a1 a9 a2 a3 a4 a5 a6 a7 a8 aErr
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+INFO: 89: apparently there is nothing to commit
+INFO: 89: try changing something first
+.INP: _test
+.INP: verify
+.INP: show
+node node1 \
+ attributes mem=16G
+primitive a0 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a7 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a8 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive a9 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive aErr Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d2 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d4 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d6 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ op monitor interval=60m timeout=20s \
+ op monitor interval=120m timeout=20s \
+ op_params OCF_CHECK_LEVEL=10 \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy
+primitive p3 Dummy
+primitive st stonith:null \
+ params hostlist=node1 \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m timeout=20 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+group as a0 a1 a9 a2 a3 a4 a5 a6 a7 a8 aErr
+group g1 p1 p2 d3
+group g2 d1 d2
+clone c1 g1
+tag t-d45 d4 d5
+location l1 p3 100: node1
+location loc-d1 d1 \
+ rule -inf: not_defined webserver or mem number:lte 0 \
+ rule -inf: not_defined a2 \
+ rule webserver: defined webserver
+order o-d456 d4 d5 d6
+order o1 Mandatory: p3 c1
+property cib-bootstrap-options:
+rsc_defaults rsc_options: \
+ failure-timeout=10m
+op_defaults op-options: \
+ timeout=60s
+.INP: commit
+INFO: 93: apparently there is nothing to commit
+INFO: 93: try changing something first
diff --git a/test/testcases/file b/test/testcases/file
new file mode 100644
index 0000000..5f215b7
--- /dev/null
+++ b/test/testcases/file
@@ -0,0 +1,14 @@
+configure save sample.txt
+%ext cat sample.txt
+configure erase nodes
+configure load replace sample.txt
+%ext sed -i 's/60s/2m/' sample.txt
+%ext sed -i '8a # comment' sample.txt
+session Load update
+configure
+delete m1 p1
+property cluster-recheck-interval="10m"
+load update sample.txt
+.
+configure show
+%ext rm sample.txt
diff --git a/test/testcases/file.exp b/test/testcases/file.exp
new file mode 100644
index 0000000..dce48de
--- /dev/null
+++ b/test/testcases/file.exp
@@ -0,0 +1,77 @@
+.TRY configure save sample.txt
+.EXT cat sample.txt
+node node1
+primitive p0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Delay \
+ params startdelay=2 mondelay=2 stopdelay=2 \
+ op monitor timeout=30s interval=10s \
+ op start timeout=30s interval=0s \
+ op stop timeout=30s interval=0s
+primitive p3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+clone c1 p1 \
+ meta interleave=true
+clone m1 p2 \
+ meta promotable=true interleave=true
+rsc_defaults build-resource-defaults: \
+ resource-stickiness=1
+op_defaults op-options: \
+ timeout=60s
+.TRY configure erase nodes
+.TRY configure load replace sample.txt
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT sed -i 's/60s/2m/' sample.txt
+.EXT sed -i '8a # comment' sample.txt
+.TRY Load update
+.INP: configure
+.INP: delete m1 p1
+.INP: property cluster-recheck-interval="10m"
+.INP: load update sample.txt
+ERROR: 4: syntax: Unknown command near <op> parsing 'op stop timeout=20s interval=0s'
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.TRY configure show
+node node1
+primitive p0 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Delay \
+ params startdelay=2 mondelay=2 stopdelay=2 \
+ op monitor timeout=30s interval=10s \
+ op start timeout=30s interval=0s \
+ op stop timeout=30s interval=0s
+primitive p3 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:null \
+ params hostlist=node1 \
+ op monitor timeout=20 interval=3600 \
+ op start timeout=20 interval=0s \
+ op stop timeout=15 interval=0s
+property cib-bootstrap-options: \
+ cluster-recheck-interval=10m
+rsc_defaults build-resource-defaults: \
+ resource-stickiness=1
+op_defaults op-options: \
+ timeout=60s
+.EXT rm sample.txt
diff --git a/test/testcases/history b/test/testcases/history
new file mode 100644
index 0000000..383fca8
--- /dev/null
+++ b/test/testcases/history
@@ -0,0 +1,42 @@
+session History
+history
+source history-test.tar.bz2
+info
+events
+node 15sp1-1
+node 15sp1-2
+node .*
+exclude pcmk_peer_update
+exclude
+node 15sp1-2
+exclude clear
+exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+exclude clear
+peinputs
+peinputs v
+transitions
+refresh
+resource d1
+# reduce report span
+timeframe "2019-03-22 15:07:37"
+peinputs
+resource d1
+exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+transition log
+transition nograph
+transition -1 nograph
+transition save 0 _crmsh_regtest
+transition log 49
+transition tags 49
+# reset timeframe
+timeframe
+session save _crmsh_regtest
+session load _crmsh_regtest
+session
+session pack
+.
+session History 2
+history
+session load _crmsh_regtest
+exclude
+.
diff --git a/test/testcases/history.excl b/test/testcases/history.excl
new file mode 100644
index 0000000..01f788c
--- /dev/null
+++ b/test/testcases/history.excl
@@ -0,0 +1,3 @@
+^ptest.*:
+^\.EXT tar -C ['][^']+['] -cj -f ['][^']+['] _crmsh_regtest
+^Report saved in ['][^']+
diff --git a/test/testcases/history.exp b/test/testcases/history.exp
new file mode 100644
index 0000000..55cb2c8
--- /dev/null
+++ b/test/testcases/history.exp
@@ -0,0 +1,600 @@
+.TRY History
+.INP: history
+.INP: source history-test.tar.bz2
+.INP: info
+.EXT tar -tj < history-test.tar.bz2 2> /dev/null | head -1
+.EXT tar -xj < history-test.tar.bz2
+Source: history-test.tar.bz2
+Created on: Fri Mar 22 15:08:40 CST 2019
+By: report
+Period: 2019-03-19 01:09:49 - 2019-03-22 23:08:36
+Nodes: 15sp1-1 15sp1-2
+Groups: g1
+Clones:
+Resources: stonith-sbd d1 d2
+Transitions: ... 37* 38* 39* 40* 41 42* 43 44* 45 46 0 48 49* 11 12 13* 15* 16 18 19*
+.INP: events
+2019-03-22T10:56:18.986113+08:00 15sp1-2 mysql(mysql)[2185]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:18.586826+08:00 15sp1-1 mysql(mysql)[4459]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.028197+08:00 15sp1-2 mysql(mysql)[2224]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.082101+08:00 15sp1-2 mysql(mysql)[2259]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.026652+08:00 15sp1-1 pacemaker-schedulerd[1739]: notice: * Recover mysql ( 15sp1-2 -> 15sp1-1 )
+2019-03-22T10:56:19.292370+08:00 15sp1-1 mysql(mysql)[4498]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T10:56:19.646138+08:00 15sp1-1 mysql(mysql)[4533]: ERROR: Setup problem: couldn't find command: /usr/bin/safe_mysqld
+2019-03-22T11:02:21.651185+08:00 15sp1-1 pacemakerd[1742]: warning: pacemaker-controld[1749] terminated with signal 9 (core=0)
+2019-03-22T11:45:15.291388+08:00 15sp1-1 pacemaker-controld[1813]: error: Cannot route message to unknown node node1
+2019-03-22T11:46:15.982330+08:00 15sp1-1 pacemaker-controld[1813]: error: Cannot route message to unknown node node1
+2019-03-22T14:46:29.149904+08:00 15sp1-1 sshd[11637]: error: PAM: Authentication failure for root from 10.67.19.6
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:35:10.376892+08:00 15sp1-2 pacemaker-controld[1750]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:39:50.964158+08:00 15sp1-1 pacemaker-controld[2921]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:40:41.791107+08:00 15sp1-1 pacemaker-controld[2921]: notice: Updating quorum status to true (call=53)
+2019-03-22T10:41:15.144867+08:00 15sp1-2 pacemaker-controld[2965]: notice: Updating quorum status to true (call=31)
+2019-03-22T10:42:43.668990+08:00 15sp1-1 pacemaker-controld[1740]: notice: Updating quorum status to true (call=26)
+2019-03-22T10:57:27.930481+08:00 15sp1-1 pacemaker-controld[1740]: notice: Peer 15sp1-2 was terminated (reboot) by 15sp1-1 on behalf of pacemaker-controld.1740: OK
+2019-03-22T10:57:52.410569+08:00 15sp1-1 pacemaker-controld[1740]: notice: Updating quorum status to true (call=175)
+2019-03-22T11:00:43.930597+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=34)
+2019-03-22T11:01:29.688725+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=62)
+2019-03-22T11:02:23.786295+08:00 15sp1-2 pacemaker-controld[1748]: notice: Updating quorum status to true (call=85)
+2019-03-22T10:39:55.137238+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:39:55.137767+08:00 15sp1-1 pacemaker-execd[2918]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:39:57.604345+08:00 15sp1-1 pacemaker-execd[2918]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:2467ms queue-time:0ms
+2019-03-22T10:41:13.905506+08:00 15sp1-2 pacemaker-execd[2962]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:41:13.913809+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T10:41:13.913941+08:00 15sp1-1 pacemaker-execd[2918]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:41:13.914056+08:00 15sp1-1 pacemaker-execd[2918]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:0ms queue-time:0ms
+2019-03-22T10:41:13.914284+08:00 15sp1-1 pacemaker-controld[2921]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-2
+2019-03-22T10:41:15.074728+08:00 15sp1-2 pacemaker-execd[2962]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:1170ms queue-time:0ms
+2019-03-22T10:41:16.497053+08:00 15sp1-2 pacemaker-controld[2965]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-2
+2019-03-22T10:41:16.497127+08:00 15sp1-2 pacemaker-execd[2962]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:41:16.497217+08:00 15sp1-2 pacemaker-execd[2962]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:2ms queue-time:0ms
+2019-03-22T10:42:44.878768+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:42:44.880933+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:start call_id:6
+2019-03-22T10:42:46.405487+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:start call_id:6 exit-code:0 exec-time:1524ms queue-time:0ms
+2019-03-22T10:43:08.620641+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T10:43:08.620831+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:stop call_id:7
+2019-03-22T10:43:08.621463+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:stop call_id:7 exit-code:0 exec-time:1ms queue-time:0ms
+2019-03-22T10:54:17.948621+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-1
+2019-03-22T10:54:17.948709+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:start call_id:42
+2019-03-22T10:54:19.157468+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:start call_id:42 exit-code:0 exec-time:1209ms queue-time:0ms
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.496863+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d2 action:start call_id:39
+2019-03-22T10:54:48.510603+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d2 action:start call_id:39 pid:2145 exit-code:0 exec-time:14ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:48.474653+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d2_start_0 on 15sp1-2
+2019-03-22T10:54:58.218867+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d2 action:stop call_id:40
+2019-03-22T10:54:58.234531+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d2 action:stop call_id:40 pid:2150 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.196862+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d2_stop_0 on 15sp1-2
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:00:42.659431+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation stonith-sbd_stop_0 locally on 15sp1-1
+2019-03-22T11:00:42.660180+08:00 15sp1-1 pacemaker-execd[1737]: notice: executing - rsc:stonith-sbd action:stop call_id:58
+2019-03-22T11:00:42.660574+08:00 15sp1-1 pacemaker-execd[1737]: notice: finished - rsc:stonith-sbd action:stop call_id:58 exit-code:0 exec-time:0ms queue-time:0ms
+2019-03-22T11:00:42.661106+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-2
+2019-03-22T11:00:42.660196+08:00 15sp1-2 pacemaker-execd[1745]: notice: executing - rsc:stonith-sbd action:start call_id:14
+2019-03-22T11:00:43.862608+08:00 15sp1-2 pacemaker-execd[1745]: notice: finished - rsc:stonith-sbd action:start call_id:14 exit-code:0 exec-time:1202ms queue-time:0ms
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.233648+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d2_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+2019-03-22T11:03:05.232910+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d2 action:start call_id:22
+2019-03-22T11:03:05.246921+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d2 action:start call_id:22 pid:1852 exit-code:0 exec-time:14ms queue-time:0ms
+2019-03-22T11:45:14.806899+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation stonith-sbd_start_0 on 15sp1-1
+2019-03-22T11:45:14.805511+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:stonith-sbd action:start call_id:34
+2019-03-22T11:45:16.071026+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:stonith-sbd action:start call_id:34 exit-code:0 exec-time:1266ms queue-time:0ms
+2019-03-22T11:46:15.742947+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation stonith-sbd_start_0 locally on 15sp1-2
+2019-03-22T11:46:15.743031+08:00 15sp1-2 pacemaker-execd[1745]: notice: executing - rsc:stonith-sbd action:start call_id:45
+2019-03-22T11:46:16.907002+08:00 15sp1-2 pacemaker-execd[1745]: notice: finished - rsc:stonith-sbd action:start call_id:45 exit-code:0 exec-time:1165ms queue-time:0ms
+.INP: node 15sp1-1
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+.INP: node 15sp1-2
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+.INP: node .*
+2019-03-22T10:34:47.224345+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:47.602896+08:00 15sp1-1 corosync[1656]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:48.151543+08:00 15sp1-1 corosync[1638]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:48.153049+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:36:11.038712+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:36:11.045811+08:00 15sp1-1 corosync[2731]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:36:11.314757+08:00 15sp1-1 corosync[1681]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:36:12.057745+08:00 15sp1-1 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:39:25.658818+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:39:25.747937+08:00 15sp1-1 corosync[2907]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:39:26.283560+08:00 15sp1-1 corosync[2895]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:39:26.284460+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:20.133830+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:20.417231+08:00 15sp1-1 corosync[1596]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:20.966749+08:00 15sp1-1 corosync[1570]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:20.967453+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T11:00:43.905492+08:00 15sp1-1 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T11:01:22.108074+08:00 15sp1-1 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T11:01:22.240699+08:00 15sp1-1 corosync[1604]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T11:01:22.822920+08:00 15sp1-1 corosync[1581]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T11:01:22.823827+08:00 15sp1-1 systemd[1]: Started Corosync Cluster Engine.
+.INP: exclude pcmk_peer_update
+.INP: exclude
+pcmk_peer_update
+.INP: node 15sp1-2
+2019-03-22T10:34:56.014052+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:34:56.294554+08:00 15sp1-2 corosync[1660]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:34:56.851006+08:00 15sp1-2 corosync[1639]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:34:56.854249+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:37:14.300560+08:00 15sp1-2 systemd[1]: Stopping Corosync Cluster Engine...
+2019-03-22T10:37:14.308907+08:00 15sp1-2 corosync[2762]: Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
+2019-03-22T10:37:14.526591+08:00 15sp1-2 corosync[1674]: [MAIN ] Corosync Cluster Engine exiting normally
+2019-03-22T10:37:15.321200+08:00 15sp1-2 systemd[1]: Stopped Corosync Cluster Engine.
+2019-03-22T10:40:35.393413+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:40:35.482538+08:00 15sp1-2 corosync[2951]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:40:36.016624+08:00 15sp1-2 corosync[2939]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:40:36.017584+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:42:30.288757+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:42:30.455372+08:00 15sp1-2 corosync[1594]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:42:31.022153+08:00 15sp1-2 corosync[1569]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:42:31.022858+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.416215+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Cluster node 15sp1-2 will be fenced: termination was requested
+2019-03-22T10:56:55.419697+08:00 15sp1-1 pacemaker-schedulerd[1739]: warning: Scheduling Node 15sp1-2 for STONITH
+2019-03-22T10:57:47.129510+08:00 15sp1-2 systemd[1]: Starting Corosync Cluster Engine...
+2019-03-22T10:57:47.345204+08:00 15sp1-2 corosync[1605]: [MAIN ] Corosync Cluster Engine ('2.4.4'): started and ready to provide service.
+2019-03-22T10:57:47.940808+08:00 15sp1-2 corosync[1578]: Starting Corosync Cluster Engine (corosync): [ OK ]
+2019-03-22T10:57:47.941515+08:00 15sp1-2 systemd[1]: Started Corosync Cluster Engine.
+.INP: exclude clear
+.INP: exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+.INP: exclude clear
+.INP: peinputs
+history-test/15sp1-2/pengine/pe-input-3.bz2
+history-test/15sp1-2/pengine/pe-input-4.bz2
+history-test/15sp1-2/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-4.bz2
+history-test/15sp1-1/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-6.bz2
+history-test/15sp1-1/pengine/pe-input-7.bz2
+history-test/15sp1-1/pengine/pe-input-8.bz2
+history-test/15sp1-2/pengine/pe-input-7.bz2
+history-test/15sp1-2/pengine/pe-input-8.bz2
+history-test/15sp1-1/pengine/pe-input-9.bz2
+history-test/15sp1-1/pengine/pe-input-10.bz2
+history-test/15sp1-1/pengine/pe-input-11.bz2
+history-test/15sp1-1/pengine/pe-input-12.bz2
+history-test/15sp1-1/pengine/pe-input-13.bz2
+history-test/15sp1-1/pengine/pe-input-14.bz2
+history-test/15sp1-1/pengine/pe-input-15.bz2
+history-test/15sp1-1/pengine/pe-input-16.bz2
+history-test/15sp1-1/pengine/pe-input-17.bz2
+history-test/15sp1-1/pengine/pe-input-18.bz2
+history-test/15sp1-1/pengine/pe-input-19.bz2
+history-test/15sp1-1/pengine/pe-input-20.bz2
+history-test/15sp1-1/pengine/pe-input-21.bz2
+history-test/15sp1-1/pengine/pe-input-23.bz2
+history-test/15sp1-1/pengine/pe-input-24.bz2
+history-test/15sp1-1/pengine/pe-input-25.bz2
+history-test/15sp1-1/pengine/pe-input-26.bz2
+history-test/15sp1-1/pengine/pe-input-27.bz2
+history-test/15sp1-1/pengine/pe-input-28.bz2
+history-test/15sp1-1/pengine/pe-input-30.bz2
+history-test/15sp1-1/pengine/pe-input-31.bz2
+history-test/15sp1-1/pengine/pe-input-32.bz2
+history-test/15sp1-1/pengine/pe-input-33.bz2
+history-test/15sp1-1/pengine/pe-input-34.bz2
+history-test/15sp1-1/pengine/pe-input-36.bz2
+history-test/15sp1-1/pengine/pe-input-37.bz2
+history-test/15sp1-1/pengine/pe-input-38.bz2
+history-test/15sp1-1/pengine/pe-input-39.bz2
+history-test/15sp1-1/pengine/pe-input-40.bz2
+history-test/15sp1-1/pengine/pe-input-41.bz2
+history-test/15sp1-1/pengine/pe-input-42.bz2
+history-test/15sp1-1/pengine/pe-input-43.bz2
+history-test/15sp1-1/pengine/pe-input-44.bz2
+history-test/15sp1-1/pengine/pe-input-45.bz2
+history-test/15sp1-1/pengine/pe-input-46.bz2
+history-test/15sp1-1/pengine/pe-warn-0.bz2
+history-test/15sp1-1/pengine/pe-input-48.bz2
+history-test/15sp1-1/pengine/pe-input-49.bz2
+history-test/15sp1-2/pengine/pe-input-11.bz2
+history-test/15sp1-2/pengine/pe-input-12.bz2
+history-test/15sp1-2/pengine/pe-input-13.bz2
+history-test/15sp1-2/pengine/pe-input-15.bz2
+history-test/15sp1-2/pengine/pe-input-16.bz2
+history-test/15sp1-2/pengine/pe-input-18.bz2
+history-test/15sp1-2/pengine/pe-input-19.bz2
+.INP: peinputs v
+Date Start End Filename Client User Origin Tags
+==== ===== === ======== ====== ==== ====== ====
+2019-03-22 18:35:11 18:35:11 pe-input-3 crmd hacluster 15sp1-1
+2019-03-22 18:36:10 18:36:10 pe-input-4 crmd hacluster 15sp1-1
+2019-03-22 18:37:14 18:37:14 pe-input-5 crmd hacluster 15sp1-1
+2019-03-22 18:39:51 18:39:51 pe-input-4 crmd hacluster 15sp1-1
+2019-03-22 18:39:55 18:39:57 pe-input-5 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:39:57 18:39:57 pe-input-6 cibadmin root 15sp1-1
+2019-03-22 18:40:41 18:40:41 pe-input-7 cibadmin root 15sp1-1
+2019-03-22 18:41:13 18:41:15 pe-input-8 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:41:16 --:--:-- pe-input-7 crmd hacluster 15sp1-1
+2019-03-22 18:41:16 18:41:16 pe-input-8 crmd hacluster 15sp1-1 stonith-sbd
+2019-03-22 18:42:44 18:42:46 pe-input-9 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:43:08 18:43:08 pe-input-10 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:43:23 18:43:23 pe-input-11 cibadmin root 15sp1-1
+2019-03-22 18:43:44 18:43:45 pe-input-12 cibadmin root 15sp1-1
+2019-03-22 18:44:29 18:44:29 pe-input-13 cibadmin root 15sp1-1
+2019-03-22 18:44:36 18:44:36 pe-input-14 cibadmin root 15sp1-1
+2019-03-22 18:44:38 18:44:38 pe-input-15 cibadmin root 15sp1-1
+2019-03-22 18:44:59 18:45:00 pe-input-16 cibadmin root 15sp1-1
+2019-03-22 18:45:14 18:45:14 pe-input-17 cibadmin root 15sp1-1
+2019-03-22 18:45:32 18:45:32 pe-input-18 cibadmin root 15sp1-1
+2019-03-22 18:45:37 18:45:37 pe-input-19 cibadmin root 15sp1-1
+2019-03-22 18:48:50 18:48:50 pe-input-20 cibadmin root 15sp1-1
+2019-03-22 18:48:51 --:--:-- pe-input-21 cibadmin root 15sp1-1
+2019-03-22 18:49:48 18:49:48 pe-input-23 cibadmin root 15sp1-1
+2019-03-22 18:49:53 18:49:53 pe-input-24 cibadmin root 15sp1-1
+2019-03-22 18:51:19 18:51:19 pe-input-25 cibadmin root 15sp1-1
+2019-03-22 18:51:39 18:51:39 pe-input-26 cibadmin root 15sp1-1
+2019-03-22 18:51:53 18:51:53 pe-input-27 cibadmin root 15sp1-1
+2019-03-22 18:51:54 --:--:-- pe-input-28 cibadmin root 15sp1-1
+2019-03-22 18:52:06 18:52:06 pe-input-30 cibadmin root 15sp1-1
+2019-03-22 18:52:25 18:52:25 pe-input-31 cibadmin root 15sp1-1
+2019-03-22 18:53:09 18:53:09 pe-input-32 cibadmin root 15sp1-1
+2019-03-22 18:53:15 18:53:15 pe-input-33 cibadmin root 15sp1-1
+2019-03-22 18:53:15 --:--:-- pe-input-34 cibadmin root 15sp1-1
+2019-03-22 18:54:08 18:54:08 pe-input-36 cibadmin root 15sp1-1
+2019-03-22 18:54:17 18:54:19 pe-input-37 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 18:54:48 18:54:48 pe-input-38 cibadmin root 15sp1-1 d1 d2
+2019-03-22 18:54:58 18:54:58 pe-input-39 cibadmin root 15sp1-1 d1 d2
+2019-03-22 18:56:18 18:56:19 pe-input-40 cibadmin root 15sp1-1 error
+2019-03-22 18:56:19 18:56:19 pe-input-41 cibadmin root 15sp1-1
+2019-03-22 18:56:19 18:56:19 pe-input-42 cibadmin root 15sp1-1 error
+2019-03-22 18:56:19 --:--:-- pe-input-43 cibadmin root 15sp1-1
+2019-03-22 18:56:19 18:56:19 pe-input-44 cibadmin root 15sp1-1 error
+2019-03-22 18:56:42 18:56:42 pe-input-45 cibadmin root 15sp1-1
+2019-03-22 18:56:43 --:--:-- pe-input-46 cibadmin root 15sp1-1
+2019-03-22 18:56:55 18:57:27 pe-warn-0 cibadmin root 15sp1-1
+2019-03-22 18:57:52 18:57:52 pe-input-48 cibadmin root 15sp1-1
+2019-03-22 19:00:42 19:00:43 pe-input-49 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 19:01:30 19:01:31 pe-input-11 cibadmin root 15sp1-1
+2019-03-22 19:02:24 19:02:24 pe-input-12 cibadmin root 15sp1-1
+2019-03-22 19:03:05 19:03:05 pe-input-13 cibadmin root 15sp1-1 d1 d2
+2019-03-22 19:45:14 19:45:16 pe-input-15 cibadmin root 15sp1-1 stonith-sbd
+2019-03-22 19:45:16 19:45:16 pe-input-16 cibadmin root 15sp1-1
+2019-03-22 19:46:15 19:46:15 pe-input-18 cibadmin root 15sp1-1
+2019-03-22 19:46:15 19:46:16 pe-input-19 cibadmin root 15sp1-1 stonith-sbd
+.INP: transitions
+Time Name Node Tags
+2019-03-22 18:35:11 - 18:35:11: pe-input-3 15sp1-2
+2019-03-22 18:36:10 - 18:36:10: pe-input-4 15sp1-2
+2019-03-22 18:37:14 - 18:37:14: pe-input-5 15sp1-2
+2019-03-22 18:39:51 - 18:39:51: pe-input-4 15sp1-1
+2019-03-22 18:39:55 - 18:39:57: pe-input-5 15sp1-1 stonith-sbd
+2019-03-22 18:39:57 - 18:39:57: pe-input-6 15sp1-1
+2019-03-22 18:40:41 - 18:40:41: pe-input-7 15sp1-1
+2019-03-22 18:41:13 - 18:41:15: pe-input-8 15sp1-1 stonith-sbd
+2019-03-22 18:41:16 - --:--:--: pe-input-7 15sp1-2
+2019-03-22 18:41:16 - 18:41:16: pe-input-8 15sp1-2 stonith-sbd
+2019-03-22 18:42:44 - 18:42:46: pe-input-9 15sp1-1 stonith-sbd
+2019-03-22 18:43:08 - 18:43:08: pe-input-10 15sp1-1 stonith-sbd
+2019-03-22 18:43:23 - 18:43:23: pe-input-11 15sp1-1
+2019-03-22 18:43:44 - 18:43:45: pe-input-12 15sp1-1
+2019-03-22 18:44:29 - 18:44:29: pe-input-13 15sp1-1
+2019-03-22 18:44:36 - 18:44:36: pe-input-14 15sp1-1
+2019-03-22 18:44:38 - 18:44:38: pe-input-15 15sp1-1
+2019-03-22 18:44:59 - 18:45:00: pe-input-16 15sp1-1
+2019-03-22 18:45:14 - 18:45:14: pe-input-17 15sp1-1
+2019-03-22 18:45:32 - 18:45:32: pe-input-18 15sp1-1
+2019-03-22 18:45:37 - 18:45:37: pe-input-19 15sp1-1
+2019-03-22 18:48:50 - 18:48:50: pe-input-20 15sp1-1
+2019-03-22 18:48:51 - --:--:--: pe-input-21 15sp1-1
+2019-03-22 18:49:48 - 18:49:48: pe-input-23 15sp1-1
+2019-03-22 18:49:53 - 18:49:53: pe-input-24 15sp1-1
+2019-03-22 18:51:19 - 18:51:19: pe-input-25 15sp1-1
+2019-03-22 18:51:39 - 18:51:39: pe-input-26 15sp1-1
+2019-03-22 18:51:53 - 18:51:53: pe-input-27 15sp1-1
+2019-03-22 18:51:54 - --:--:--: pe-input-28 15sp1-1
+2019-03-22 18:52:06 - 18:52:06: pe-input-30 15sp1-1
+2019-03-22 18:52:25 - 18:52:25: pe-input-31 15sp1-1
+2019-03-22 18:53:09 - 18:53:09: pe-input-32 15sp1-1
+2019-03-22 18:53:15 - 18:53:15: pe-input-33 15sp1-1
+2019-03-22 18:53:15 - --:--:--: pe-input-34 15sp1-1
+2019-03-22 18:54:08 - 18:54:08: pe-input-36 15sp1-1
+2019-03-22 18:54:17 - 18:54:19: pe-input-37 15sp1-1 stonith-sbd
+2019-03-22 18:54:48 - 18:54:48: pe-input-38 15sp1-1 d1 d2
+2019-03-22 18:54:58 - 18:54:58: pe-input-39 15sp1-1 d1 d2
+2019-03-22 18:56:18 - 18:56:19: pe-input-40 15sp1-1 error
+2019-03-22 18:56:19 - 18:56:19: pe-input-41 15sp1-1
+2019-03-22 18:56:19 - 18:56:19: pe-input-42 15sp1-1 error
+2019-03-22 18:56:19 - --:--:--: pe-input-43 15sp1-1
+2019-03-22 18:56:19 - 18:56:19: pe-input-44 15sp1-1 error
+2019-03-22 18:56:42 - 18:56:42: pe-input-45 15sp1-1
+2019-03-22 18:56:43 - --:--:--: pe-input-46 15sp1-1
+2019-03-22 18:56:55 - 18:57:27: pe-warn-0 15sp1-1
+2019-03-22 18:57:52 - 18:57:52: pe-input-48 15sp1-1
+2019-03-22 19:00:42 - 19:00:43: pe-input-49 15sp1-1 stonith-sbd
+2019-03-22 19:01:30 - 19:01:31: pe-input-11 15sp1-2
+2019-03-22 19:02:24 - 19:02:24: pe-input-12 15sp1-2
+2019-03-22 19:03:05 - 19:03:05: pe-input-13 15sp1-2 d1 d2
+2019-03-22 19:45:14 - 19:45:16: pe-input-15 15sp1-2 stonith-sbd
+2019-03-22 19:45:16 - 19:45:16: pe-input-16 15sp1-2
+2019-03-22 19:46:15 - 19:46:15: pe-input-18 15sp1-2
+2019-03-22 19:46:15 - 19:46:16: pe-input-19 15sp1-2 stonith-sbd
+.INP: refresh
+Refreshing log data...
+55 transitions, 116 events.
+.INP: resource d1
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+.INP: # reduce report span
+.INP: timeframe "2019-03-22 15:07:37"
+WARNING: 20: This command 'timeframe' is deprecated, please use 'limit'
+INFO: 20: "timeframe" is accepted as "limit"
+.INP: peinputs
+history-test/15sp1-2/pengine/pe-input-3.bz2
+history-test/15sp1-2/pengine/pe-input-4.bz2
+history-test/15sp1-2/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-4.bz2
+history-test/15sp1-1/pengine/pe-input-5.bz2
+history-test/15sp1-1/pengine/pe-input-6.bz2
+history-test/15sp1-1/pengine/pe-input-7.bz2
+history-test/15sp1-1/pengine/pe-input-8.bz2
+history-test/15sp1-2/pengine/pe-input-7.bz2
+history-test/15sp1-2/pengine/pe-input-8.bz2
+history-test/15sp1-1/pengine/pe-input-9.bz2
+history-test/15sp1-1/pengine/pe-input-10.bz2
+history-test/15sp1-1/pengine/pe-input-11.bz2
+history-test/15sp1-1/pengine/pe-input-12.bz2
+history-test/15sp1-1/pengine/pe-input-13.bz2
+history-test/15sp1-1/pengine/pe-input-14.bz2
+history-test/15sp1-1/pengine/pe-input-15.bz2
+history-test/15sp1-1/pengine/pe-input-16.bz2
+history-test/15sp1-1/pengine/pe-input-17.bz2
+history-test/15sp1-1/pengine/pe-input-18.bz2
+history-test/15sp1-1/pengine/pe-input-19.bz2
+history-test/15sp1-1/pengine/pe-input-20.bz2
+history-test/15sp1-1/pengine/pe-input-21.bz2
+history-test/15sp1-1/pengine/pe-input-23.bz2
+history-test/15sp1-1/pengine/pe-input-24.bz2
+history-test/15sp1-1/pengine/pe-input-25.bz2
+history-test/15sp1-1/pengine/pe-input-26.bz2
+history-test/15sp1-1/pengine/pe-input-27.bz2
+history-test/15sp1-1/pengine/pe-input-28.bz2
+history-test/15sp1-1/pengine/pe-input-30.bz2
+history-test/15sp1-1/pengine/pe-input-31.bz2
+history-test/15sp1-1/pengine/pe-input-32.bz2
+history-test/15sp1-1/pengine/pe-input-33.bz2
+history-test/15sp1-1/pengine/pe-input-34.bz2
+history-test/15sp1-1/pengine/pe-input-36.bz2
+history-test/15sp1-1/pengine/pe-input-37.bz2
+history-test/15sp1-1/pengine/pe-input-38.bz2
+history-test/15sp1-1/pengine/pe-input-39.bz2
+history-test/15sp1-1/pengine/pe-input-40.bz2
+history-test/15sp1-1/pengine/pe-input-41.bz2
+history-test/15sp1-1/pengine/pe-input-42.bz2
+history-test/15sp1-1/pengine/pe-input-43.bz2
+history-test/15sp1-1/pengine/pe-input-44.bz2
+history-test/15sp1-1/pengine/pe-input-45.bz2
+history-test/15sp1-1/pengine/pe-input-46.bz2
+history-test/15sp1-1/pengine/pe-warn-0.bz2
+history-test/15sp1-1/pengine/pe-input-48.bz2
+history-test/15sp1-1/pengine/pe-input-49.bz2
+history-test/15sp1-2/pengine/pe-input-11.bz2
+history-test/15sp1-2/pengine/pe-input-12.bz2
+history-test/15sp1-2/pengine/pe-input-13.bz2
+history-test/15sp1-2/pengine/pe-input-15.bz2
+history-test/15sp1-2/pengine/pe-input-16.bz2
+history-test/15sp1-2/pengine/pe-input-18.bz2
+history-test/15sp1-2/pengine/pe-input-19.bz2
+.INP: resource d1
+2019-03-22T10:54:48.477139+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:start call_id:38
+2019-03-22T10:54:48.492428+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:start call_id:38 pid:2141 exit-code:0 exec-time:15ms queue-time:0ms
+2019-03-22T10:54:48.455384+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating start operation d1_start_0 on 15sp1-2
+2019-03-22T10:54:58.240981+08:00 15sp1-2 pacemaker-execd[1737]: notice: executing - rsc:d1 action:stop call_id:41
+2019-03-22T10:54:58.256143+08:00 15sp1-2 pacemaker-execd[1737]: notice: finished - rsc:d1 action:stop call_id:41 pid:2154 exit-code:0 exec-time:16ms queue-time:0ms
+2019-03-22T10:54:58.219353+08:00 15sp1-1 pacemaker-controld[1740]: notice: Initiating stop operation d1_stop_0 on 15sp1-2
+2019-03-22T11:03:05.194349+08:00 15sp1-2 pacemaker-controld[1748]: notice: Initiating start operation d1_start_0 on 15sp1-1
+2019-03-22T11:03:05.193318+08:00 15sp1-1 pacemaker-execd[1746]: notice: executing - rsc:d1 action:start call_id:21
+2019-03-22T11:03:05.226554+08:00 15sp1-1 pacemaker-execd[1746]: notice: finished - rsc:d1 action:start call_id:21 pid:1848 exit-code:0 exec-time:33ms queue-time:0ms
+.INP: exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
+.INP: transition log
+2019-03-22T11:46:15.797222+08:00 15sp1-2 sbd[2770]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:46:15.812786+08:00 15sp1-2 sbd[2774]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+.INP: transition nograph
+INFO: 25: running ptest with history-test/15sp1-2/pengine/pe-input-19.bz2
+.EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
+Transition 15sp1-2:pe-input-19 (19:46:15 - 19:46:16):
+ total 1 actions: 1 Complete
+.INP: transition -1 nograph
+INFO: 26: running ptest with history-test/15sp1-2/pengine/pe-input-18.bz2
+.EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
+Transition 15sp1-2:pe-input-18 (19:46:15 - 19:46:15):
+ total 12 actions: 7 Complete, 1 Skipped, 4 Incomplete
+.INP: transition save 0 _crmsh_regtest
+INFO: 27: transition history-test/15sp1-2/pengine/pe-input-19.bz2 saved to shadow _crmsh_regtest
+.INP: transition log 49
+2019-03-22T11:00:42.614804+08:00 15sp1-1 systemd[1]: Stopped target Timers.
+2019-03-22T11:00:42.615759+08:00 15sp1-1 systemd[1]: Stopped Discard unused blocks once a week.
+2019-03-22T11:00:42.615966+08:00 15sp1-1 systemd[1]: Stopped Scrub btrfs filesystem, verify block checksums.
+2019-03-22T11:00:42.616312+08:00 15sp1-1 systemd[1]: Stopped target Sound Card.
+2019-03-22T11:00:42.616521+08:00 15sp1-1 systemd[1]: Stopped Daily Cleanup of Temporary Directories.
+2019-03-22T11:00:42.616840+08:00 15sp1-1 systemd[1]: Stopped target Multi-User System.
+2019-03-22T11:00:42.617530+08:00 15sp1-1 pacemakerd[1733]: notice: Caught 'Terminated' signal
+2019-03-22T11:00:42.617672+08:00 15sp1-1 pacemakerd[1733]: notice: Shutting down Pacemaker
+2019-03-22T11:00:42.635974+08:00 15sp1-1 systemd[1]: Stopping Pacemaker High Availability Cluster Manager...
+2019-03-22T11:00:42.640402+08:00 15sp1-1 systemd[1]: Stopped target Login Prompts.
+2019-03-22T11:00:42.649788+08:00 15sp1-1 systemd[1]: Stopping Session 1 of user root.
+2019-03-22T11:00:42.656415+08:00 15sp1-1 systemd[1]: Stopping OpenSSH Daemon...
+2019-03-22T11:00:42.659094+08:00 15sp1-1 systemd[1]: Stopped Detect if the system suffers from bsc#1089761.
+2019-03-22T11:00:42.660023+08:00 15sp1-1 systemd[1]: Stopped Timeline of Snapper Snapshots.
+2019-03-22T11:00:42.660434+08:00 15sp1-1 systemd[1]: Stopping Restore /run/initramfs on shutdown...
+2019-03-22T11:00:42.660712+08:00 15sp1-1 systemd[1]: Stopped Do daily mandb update.
+2019-03-22T11:00:42.660980+08:00 15sp1-1 systemd[1]: Stopped Check if mainboard battery is Ok.
+2019-03-22T11:00:42.661239+08:00 15sp1-1 systemd[1]: Stopped Early Kernel Boot Messages.
+2019-03-22T11:00:42.661471+08:00 15sp1-1 systemd[1]: Stopped Apply settings from /etc/sysconfig/keyboard.
+2019-03-22T11:00:42.661722+08:00 15sp1-1 systemd[1]: Closed LVM2 poll daemon socket.
+2019-03-22T11:00:42.661854+08:00 15sp1-1 systemd[1]: Stopped Backup of RPM database.
+2019-03-22T11:00:42.661990+08:00 15sp1-1 systemd[1]: Stopped Backup of /etc/sysconfig.
+2019-03-22T11:00:42.663466+08:00 15sp1-2 systemd[1]: Started Timeline of Snapper Snapshots.
+2019-03-22T11:00:42.673313+08:00 15sp1-1 systemd[1766]: Stopped target Default.
+2019-03-22T11:00:42.673554+08:00 15sp1-1 systemd[1766]: Stopped target Basic System.
+2019-03-22T11:00:42.673738+08:00 15sp1-1 systemd[1766]: Stopped target Sockets.
+2019-03-22T11:00:42.673880+08:00 15sp1-1 systemd[1766]: Closed D-Bus User Message Bus Socket.
+2019-03-22T11:00:42.674004+08:00 15sp1-1 systemd[1766]: Stopped target Paths.
+2019-03-22T11:00:42.674122+08:00 15sp1-1 systemd[1766]: Reached target Shutdown.
+2019-03-22T11:00:42.674236+08:00 15sp1-1 systemd[1766]: Stopped target Timers.
+2019-03-22T11:00:42.674360+08:00 15sp1-1 systemd[1766]: Starting Exit the Session...
+2019-03-22T11:00:42.674478+08:00 15sp1-1 systemd[1]: Stopping User Manager for UID 0...
+2019-03-22T11:00:42.674594+08:00 15sp1-1 systemd[1]: Stopped Balance block groups on a btrfs filesystem.
+2019-03-22T11:00:42.674701+08:00 15sp1-1 systemd[1]: Stopping iSCSI UserSpace I/O driver...
+2019-03-22T11:00:42.674806+08:00 15sp1-1 systemd[1]: Stopping Getty on tty1...
+2019-03-22T11:00:42.674911+08:00 15sp1-1 systemd[1]: Stopping Command Scheduler...
+2019-03-22T11:00:42.675020+08:00 15sp1-1 systemd[1]: Stopped Daily rotation of log files.
+2019-03-22T11:00:42.675126+08:00 15sp1-1 systemd[1]: Stopped Daily Cleanup of Snapper Snapshots.
+2019-03-22T11:00:42.675231+08:00 15sp1-1 systemd[1]: Removed slice system-systemd\x2dhibernate\x2dresume.slice.
+2019-03-22T11:00:42.675345+08:00 15sp1-1 systemd[1]: Stopped iSCSI UserSpace I/O driver.
+2019-03-22T11:00:42.675452+08:00 15sp1-1 systemd[1]: Stopped OpenSSH Daemon.
+2019-03-22T11:00:42.675561+08:00 15sp1-1 systemd[1]: Stopped Session 1 of user root.
+2019-03-22T11:00:42.683003+08:00 15sp1-1 systemd-logind[819]: Session 1 logged out. Waiting for processes to exit.
+2019-03-22T11:00:42.683239+08:00 15sp1-1 systemd[1]: Stopped Getty on tty1.
+2019-03-22T11:00:42.683375+08:00 15sp1-1 systemd[1]: Stopped Restore /run/initramfs on shutdown.
+2019-03-22T11:00:42.683487+08:00 15sp1-1 systemd-logind[819]: Removed session 1.
+2019-03-22T11:00:42.683603+08:00 15sp1-1 systemd[1]: Starting Show Plymouth Reboot Screen...
+2019-03-22T11:00:42.683861+08:00 15sp1-1 systemd[1]: Removed slice system-getty.slice.
+2019-03-22T11:00:42.686592+08:00 15sp1-1 systemd[1]: Received SIGRTMIN+20 from PID 5230 (plymouthd).
+2019-03-22T11:00:42.687482+08:00 15sp1-2 dbus-daemon[768]: [system] Activating service name='org.opensuse.Snapper' requested by ':1.13' (uid=0 pid=1835 comm="/usr/lib/snapper/systemd-helper --timeline ") (using servicehelper)
+2019-03-22T11:00:42.687871+08:00 15sp1-1 cron[1730]: (CRON) INFO (Shutting down)
+2019-03-22T11:00:42.689646+08:00 15sp1-1 systemd[1]: Stopped Command Scheduler.
+2019-03-22T11:00:42.689784+08:00 15sp1-1 systemd[1]: Stopping Postfix Mail Transport Agent...
+2019-03-22T11:00:42.705412+08:00 15sp1-2 dbus-daemon[768]: [system] Successfully activated service 'org.opensuse.Snapper'
+2019-03-22T11:00:42.745173+08:00 15sp1-2 sbd[1847]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:00:42.760480+08:00 15sp1-2 sbd[1851]: notice: main: Doing flush + writing 'b' to sysrq on timeout
+2019-03-22T11:00:42.765095+08:00 15sp1-1 systemd[1]: Stopped Postfix Mail Transport Agent.
+2019-03-22T11:00:42.765239+08:00 15sp1-1 systemd[1]: Stopped target Host and Network Name Lookups.
+.INP: transition tags 49
+stonith-sbd
+.INP: # reset timeframe
+.INP: timeframe
+WARNING: 31: This command 'timeframe' is deprecated, please use 'limit'
+INFO: 31: "timeframe" is accepted as "limit"
+.INP: session save _crmsh_regtest
+.INP: session load _crmsh_regtest
+.INP: session
+current session: _crmsh_regtest
+.INP: session pack
+.TRY History 2
+.INP: history
+.INP: session load _crmsh_regtest
+.INP: exclude
+corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd
diff --git a/test/testcases/history.post b/test/testcases/history.post
new file mode 100755
index 0000000..b5bb7fc
--- /dev/null
+++ b/test/testcases/history.post
@@ -0,0 +1,3 @@
+#!/bin/sh
+crm history session delete _crmsh_regtest
+rm -r history-test
diff --git a/test/testcases/history.pre b/test/testcases/history.pre
new file mode 100755
index 0000000..4905f13
--- /dev/null
+++ b/test/testcases/history.pre
@@ -0,0 +1,3 @@
+#!/bin/sh
+crm history session delete _crmsh_regtest
+rm -rf history-test
diff --git a/test/testcases/newfeatures b/test/testcases/newfeatures
new file mode 100644
index 0000000..5723625
--- /dev/null
+++ b/test/testcases/newfeatures
@@ -0,0 +1,44 @@
+session New features
+configure
+# erase to start from scratch
+erase
+erase nodes
+node node1
+# create one stonith so that verify does not complain
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ meta target-role="Started" requires=nothing \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s
+primitive p0 Dummy params $p0-state:state=1
+primitive p1 Dummy params \
+ rule role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 \
+ state=2
+primitive p2 Dummy params @p0-state
+property rule #uname eq node1 stonith-enabled=no
+tag tag1: p0 p1 p2
+tag tag2 p0 p1 p2
+location l1 { p0 p1 p2 } inf: node1
+primitive node1 Dummy
+tag ones l1 p1
+alert notify_9 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ attributes \
+ trap_add_hires_timestamp_oid="false" \
+ trap_node_states="non-trap" \
+ trap_resource_tasks="start,stop,monitor,promote,demote" \
+ to "192.168.40.9"
+alert notify_10 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ attributes \
+ trap_add_hires_timestamp_oid="false" \
+ select attributes { master-prmStateful test1 } \
+ to 192.168.28.188
+alert notify_11 /usr/share/pacemaker/alerts/alert_snmp.sh \
+ select fencing nodes resources \
+ to 192.168.28.188
+show tag:ones and type:location
+show tag:ones and p1
+show
+_test
+verify
+commit
+.
diff --git a/test/testcases/newfeatures.exp b/test/testcases/newfeatures.exp
new file mode 100644
index 0000000..897f315
--- /dev/null
+++ b/test/testcases/newfeatures.exp
@@ -0,0 +1,81 @@
+.TRY New features
+.INP: configure
+.INP: # erase to start from scratch
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: # create one stonith so that verify does not complain
+.INP: primitive st stonith:ssh params hostlist='node1' meta target-role="Started" requires=nothing op start timeout=60s op monitor interval=60m timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive p0 Dummy params $p0-state:state=1
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive p1 Dummy params rule role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2
+.INP: primitive p2 Dummy params @p0-state
+.INP: property rule #uname eq node1 stonith-enabled=no
+.INP: tag tag1: p0 p1 p2
+.INP: tag tag2 p0 p1 p2
+.INP: location l1 { p0 p1 p2 } inf: node1
+.INP: primitive node1 Dummy
+.INP: tag ones l1 p1
+.INP: alert notify_9 /usr/share/pacemaker/alerts/alert_snmp.sh attributes trap_add_hires_timestamp_oid="false" trap_node_states="non-trap" trap_resource_tasks="start,stop,monitor,promote,demote" to "192.168.40.9"
+.INP: alert notify_10 /usr/share/pacemaker/alerts/alert_snmp.sh attributes trap_add_hires_timestamp_oid="false" select attributes { master-prmStateful test1 } to 192.168.28.188
+.INP: alert notify_11 /usr/share/pacemaker/alerts/alert_snmp.sh select fencing nodes resources to 192.168.28.188
+.INP: show tag:ones and type:location
+location l1 { p0 p1 p2 } inf: node1
+.INP: show tag:ones and p1
+primitive p1 Dummy \
+ params rule $role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+.INP: show
+node node1
+primitive node1 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p0 Dummy \
+ params state=1 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p1 Dummy \
+ params rule $role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 state=2 \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive p2 Dummy \
+ params @p0-state \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ meta target-role=Started requires=nothing \
+ op start timeout=60s interval=0s \
+ op monitor interval=60m timeout=60s \
+ op stop timeout=15 interval=0s
+tag ones l1 p1
+tag tag1 p0 p1 p2
+tag tag2 p0 p1 p2
+location l1 { p0 p1 p2 } inf: node1
+property cib-bootstrap-options: \
+ rule #uname eq node1 \
+ stonith-enabled=no
+alert notify_10 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ attributes trap_add_hires_timestamp_oid=false \
+ select attributes { master-prmStateful test1 } \
+ to 192.168.28.188
+alert notify_11 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ select fencing nodes resources \
+ to 192.168.28.188
+alert notify_9 "/usr/share/pacemaker/alerts/alert_snmp.sh" \
+ attributes trap_add_hires_timestamp_oid=false trap_node_states=non-trap trap_resource_tasks="start,stop,monitor,promote,demote" \
+ to 192.168.40.9
+.INP: _test
+.INP: verify
+.EXT crmd metadata
+.EXT pengine metadata
+.EXT cib metadata
+.INP: commit
diff --git a/test/testcases/node b/test/testcases/node
new file mode 100644
index 0000000..f0a5fc1
--- /dev/null
+++ b/test/testcases/node
@@ -0,0 +1,14 @@
+node show
+node show node1
+%setenv showobj=node1
+configure primitive p5 Dummy
+configure group g0 p5
+resource maintenance g0
+resource maintenance p5
+-F node maintenance node1
+node ready node1
+node attribute node1 set a1 "1 2 3"
+node attribute node1 show a1
+node attribute node1 delete a1
+node clearstate node1
+
diff --git a/test/testcases/node.exp b/test/testcases/node.exp
new file mode 100644
index 0000000..d91c33c
--- /dev/null
+++ b/test/testcases/node.exp
@@ -0,0 +1,204 @@
+.TRY node show
+node1: member
+.TRY node show node1
+node1: member
+.SETENV showobj=node1
+.TRY configure primitive p5 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure group g0 p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance g0
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY -F node maintenance node1
+INFO: 'maintenance' attribute already exists in p5. Remove it? [YES]
+INFO: 'maintenance' attribute already exists in g0. Remove it? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="true"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node ready node1
+.EXT crm_attribute -t nodes -N 'node1' -n maintenance -v 'off'
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 set a1 "1 2 3"
+.EXT crm_attribute -t nodes -N 'node1' -n 'a1' -v '1 2 3'
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ <nvpair id="nodes-node1-a1" name="a1" value="1 2 3"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 show a1
+.EXT crm_attribute -G -t nodes -N 'node1' -n 'a1'
+scope=nodes name=a1 value=1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ <nvpair id="nodes-node1-a1" name="a1" value="1 2 3"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node attribute node1 delete a1
+.EXT crm_attribute -D -t nodes -N 'node1' -n 'a1'
+Deleted nodes attribute: id=nodes-node1-a1 name=a1
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY node clearstate node1
+.INP: configure
+.INP: _regtest on
+.INP: show xml node1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1">
+ <instance_attributes id="nodes-node1">
+ <nvpair id="nodes-node1-maintenance" name="maintenance" value="off"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+</cib>
+
diff --git a/test/testcases/options b/test/testcases/options
new file mode 100644
index 0000000..44f331b
--- /dev/null
+++ b/test/testcases/options
@@ -0,0 +1,23 @@
+session Options
+options
+reset
+pager cat
+editor vi
+show
+check-frequency never
+check-mode nosuchever
+colorscheme normal,yellow,cyan,red,green,magenta
+colorscheme normal,yellow,cyan,red
+pager nosuchprogram
+skill-level operator
+skill-level joe
+skill-level expert
+output plain
+output misplain
+wait true
+wait off
+wait happy
+show
+save
+.
+options show
diff --git a/test/testcases/options.exp b/test/testcases/options.exp
new file mode 100644
index 0000000..f13d308
--- /dev/null
+++ b/test/testcases/options.exp
@@ -0,0 +1,64 @@
+.TRY Options
+.INP: options
+.INP: reset
+.INP: pager cat
+.INP: editor vi
+.INP: show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "color"
+colorscheme "yellow,normal,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "always"
+check-mode "strict"
+wait "no"
+add-quotes "yes"
+manage-children "ask"
+.INP: check-frequency never
+.INP: check-mode nosuchever
+ERROR: nosuchever not valid (choose one from strict,relaxed)
+.INP: colorscheme normal,yellow,cyan,red,green,magenta
+.INP: colorscheme normal,yellow,cyan,red
+ERROR: bad color scheme: normal,yellow,cyan,red
+.INP: pager nosuchprogram
+ERROR: nosuchprogram does not exist or is not a program
+.INP: skill-level operator
+.INP: skill-level joe
+ERROR: joe not valid (choose one from operator,administrator,expert)
+.INP: skill-level expert
+.INP: output plain
+.INP: output misplain
+ERROR: misplain not valid (choose one from plain,color,uppercase)
+.INP: wait true
+.INP: wait off
+.INP: wait happy
+ERROR: happy not valid (yes or no are valid)
+.INP: show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "plain"
+colorscheme "normal,yellow,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "never"
+check-mode "strict"
+wait "off"
+add-quotes "yes"
+manage-children "ask"
+.INP: save
+.TRY options show
+editor "vi"
+pager "cat"
+user ""
+skill-level "expert"
+output "plain"
+colorscheme "normal,yellow,cyan,red,green,magenta"
+sort-elements "yes"
+check-frequency "never"
+check-mode "strict"
+wait "off"
+add-quotes "yes"
+manage-children "ask"
diff --git a/test/testcases/ra b/test/testcases/ra
new file mode 100644
index 0000000..bd44a3a
--- /dev/null
+++ b/test/testcases/ra
@@ -0,0 +1,7 @@
+session RA interface
+ra
+providers IPaddr
+providers Dummy
+info ocf:pacemaker:Dummy
+info stonith:external/ssh
+.
diff --git a/test/testcases/ra.exp b/test/testcases/ra.exp
new file mode 100644
index 0000000..5d15734
--- /dev/null
+++ b/test/testcases/ra.exp
@@ -0,0 +1,150 @@
+.TRY RA interface
+.INP: ra
+.INP: providers IPaddr
+
+heartbeat
+.INP: providers Dummy
+heartbeat pacemaker
+.INP: info ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+Example stateless resource agent (ocf:pacemaker:Dummy)
+
+This is a dummy OCF resource agent. It does absolutely nothing except keep track
+of whether it is running or not, and can be configured so that actions fail or
+take a long time. Its purpose is primarily for testing, and to serve as a
+template for resource agent writers.
+
+Parameters (*: required, []: default):
+
+state (string, [state-file]): State file
+ Location to store the resource state in.
+
+passwd (string): Password
+ Fake password field
+
+fake (string, [dummy]):
+ Fake attribute that can be changed to cause an agent reload
+
+op_sleep (string, [0]): Operation sleep duration in seconds.
+ Number of seconds to sleep during operations. This can be used to test how
+ the cluster reacts to operation timeouts.
+
+fail_start_on (string): Report bogus start failure on specified host
+ Start, migrate_from, and reload-agent actions will return failure if running on
+ the host specified here, but the resource will run successfully anyway (future
+ monitor calls will find it running). This can be used to test on-fail=ignore.
+
+envfile (string): Environment dump file
+ If this is set, the environment will be dumped to this file for every call.
+
+Operations' defaults (advisory minimum):
+
+ start timeout=20s
+ stop timeout=20s
+ monitor timeout=20s interval=10s depth=0
+ reload timeout=20s
+ reload-agent timeout=20s
+ migrate_to timeout=20s
+ migrate_from timeout=20s
+.INP: info stonith:external/ssh
+.EXT crm_resource --show-metadata stonith:external/ssh
+.EXT stonithd metadata
+ssh STONITH device (stonith:external/ssh)
+
+ssh-based host reset
+Fine for testing, but not suitable for production!
+Only reboot action supported, no poweroff, and, surprisingly enough, no poweron.
+
+Parameters (*: required, []: default):
+
+hostlist* (string): Hostlist
+ The list of hosts that the STONITH device controls
+
+livedangerously (enum): Live Dangerously!!
+ Set to "yes" if you want to risk your system's integrity.
+ Of course, since this plugin isn't for production, using it
+ in production at all is a bad idea. On the other hand,
+ setting this parameter to yes makes it an even worse idea.
+ Viva la Vida Loca!
+
+pcmk_host_argument (string, [port]): Advanced use only: An alternate parameter to supply instead of 'port'
+ some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of none can be used to tell the cluster not to supply any additional parameters.
+
+pcmk_host_map (string): A mapping of host names to ports numbers for devices that do not support host names.
+ Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2
+
+pcmk_host_list (string): Eg. node1,node2,node3
+ A list of machines controlled by this device (Optional unless pcmk_host_list=static-list)
+
+pcmk_host_check (string, [dynamic-list]): How to determine which machines are controlled by the device.
+ Allowed values: dynamic-list (query the device via the 'list' command), static-list (check the pcmk_host_list attribute), status (query the device via the 'status' command), none (assume every device can fence every machine)
+
+pcmk_delay_max (time, [0s]): Enable a base delay for fencing actions and specify base delay value.
+ Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
+
+pcmk_delay_base (string, [0s]): Enable a base delay for fencing actions and specify base delay value.
+ This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value.This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value per target.
+
+pcmk_action_limit (integer, [1]): The maximum number of actions can be performed in parallel on this device
+ Cluster property concurrent-fencing=true needs to be configured first.Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.
+
+pcmk_reboot_action (string, [reboot]): Advanced use only: An alternate command to run instead of 'reboot'
+ Some devices do not support the standard commands or may provide additional ones.\nUse this to specify an alternate, device-specific, command that implements the 'reboot' action.
+
+pcmk_reboot_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
+
+pcmk_reboot_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'reboot' actions before giving up.
+
+pcmk_off_action (string, [off]): Advanced use only: An alternate command to run instead of 'off'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'off' action.
+
+pcmk_off_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'off' actions.
+
+pcmk_off_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'off' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'off' actions before giving up.
+
+pcmk_on_action (string, [on]): Advanced use only: An alternate command to run instead of 'on'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'on' action.
+
+pcmk_on_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'on' actions.
+
+pcmk_on_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'on' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'on' actions before giving up.
+
+pcmk_list_action (string, [list]): Advanced use only: An alternate command to run instead of 'list'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'list' action.
+
+pcmk_list_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'list' actions.
+
+pcmk_list_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'list' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'list' actions before giving up.
+
+pcmk_monitor_action (string, [monitor]): Advanced use only: An alternate command to run instead of 'monitor'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
+
+pcmk_monitor_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.\nUse this to specify an alternate, device-specific, timeout for 'monitor' actions.
+
+pcmk_monitor_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'monitor' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'monitor' actions before giving up.
+
+pcmk_status_action (string, [status]): Advanced use only: An alternate command to run instead of 'status'
+ Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'status' action.
+
+pcmk_status_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout
+ Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'status' actions.
+
+pcmk_status_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'status' command within the timeout period
+ Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'status' actions before giving up.
+
+Operations' defaults (advisory minimum):
+
+ start timeout=20
+ stop timeout=15
+ status timeout=20
+ monitor timeout=20 interval=3600
diff --git a/test/testcases/ra.filter b/test/testcases/ra.filter
new file mode 100755
index 0000000..bc57a83
--- /dev/null
+++ b/test/testcases/ra.filter
@@ -0,0 +1,17 @@
+#!/usr/bin/awk -f
+# reduce the providers list to heartbeat and pacemaker
+# (prevents other providers creeping in)
+function reduce(a) {
+ a["heartbeat"]=1; a["pacemaker"]=1;
+ s="";
+ for( i=1; i<=NF; i++ )
+ if( $i in a )
+ s=s" "$i;
+ return substr(s,2);
+}
+n==1 { n=0; print reduce(a); next; }
+/providers IPaddr/ { n=1; }
+/providers Dummy/ { n=1; }
+/^ssh STONITH/ { sub(" external",""); }
+/^state \(string, \[(.*)\]\):/ { gsub(/\[.*\]/, "[state-file]") }
+{ print }
diff --git a/test/testcases/resource b/test/testcases/resource
new file mode 100644
index 0000000..8fad9b6
--- /dev/null
+++ b/test/testcases/resource
@@ -0,0 +1,84 @@
+resource status p0
+%setenv showobj=p3
+resource start p3
+resource stop p3
+%setenv showobj=c1
+resource manage c1
+resource unmanage c1
+%setenv showobj=p2
+resource maintenance p2 on
+resource maintenance p2 off
+%setenv showobj=cli-prefer-p3
+resource migrate p3 node1
+%setenv showobj=
+resource unmigrate p3
+%setenv showobj=cli-prefer-p3
+resource migrate p3 node1 force
+%setenv showobj=
+resource unmigrate p3
+%setenv showobj=p0
+resource param p0 set a0 "1 2 3"
+resource param p0 show a0
+resource param p0 delete a0
+resource meta p0 set m0 123
+resource meta p0 show m0
+resource meta p0 delete m0
+resource trace p0 probe
+resource trace p0 start
+resource trace p0 stop
+resource untrace p0 probe
+resource untrace p0 start
+resource untrace p0 stop
+configure group g p0 p3
+options manage-children never
+resource start g
+resource start p0
+resource stop g
+configure clone cg g
+options manage-children always
+resource start g
+resource stop g
+resource start cg
+resource stop p0
+resource start cg
+resource stop cg
+resource stop p3
+%setenv showobj=
+configure rename p3 p4
+configure primitive p3 Dummy
+resource stop p3
+resource start p3
+resource cleanup
+resource cleanup p3
+resource cleanup p3 node1
+resource cleanup force
+resource cleanup p3 force
+resource cleanup p3 node1 force
+resource refresh
+resource refresh p3
+resource refresh p3 node1
+resource refresh force
+resource refresh p3 force
+resource refresh p3 node1 force
+resource stop p3
+configure rm cg
+configure ms msg g
+resource scores
+%setenv showobj=
+configure primitive p5 Dummy
+configure group g1 p5
+resource manage p5
+%setenv showobj=p5
+-F resource maintenance p5 on
+%setenv showobj=p5
+-F resource unmanage p5
+%setenv showobj=p5
+-F resource maintenance g1
+resource start p5
+%setenv showobj=g1
+-F resource manage g1
+resource start p5
+%setenv showobj=p5
+-F resource maintenance p5 on
+%setenv showobj=g1
+-F resource maintenance g1
diff --git a/test/testcases/resource.exp b/test/testcases/resource.exp
new file mode 100644
index 0000000..c03aae7
--- /dev/null
+++ b/test/testcases/resource.exp
@@ -0,0 +1,1450 @@
+.TRY resource status p0
+.EXT crm_resource --locate --resource 'p0'
+resource p0 is NOT running
+.SETENV showobj=p3
+.TRY resource start p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=c1
+.TRY resource manage c1
+.INP: configure
+.INP: _regtest on
+.INP: show xml c1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="c1">
+ <meta_attributes id="c1-meta_attributes">
+ <nvpair id="c1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="c1-meta_attributes-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p1-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource unmanage c1
+.INP: configure
+.INP: _regtest on
+.INP: show xml c1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="c1">
+ <meta_attributes id="c1-meta_attributes">
+ <nvpair id="c1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="c1-meta_attributes-is-managed" name="is-managed" value="false"/>
+ </meta_attributes>
+ <primitive id="p1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p1-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p2
+.TRY resource maintenance p2 on
+.INP: configure
+.INP: _regtest on
+.INP: show xml p2
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="m1">
+ <meta_attributes id="m1-meta_attributes">
+ <nvpair name="promotable" value="true" id="m1-meta_attributes-promotable"/>
+ <nvpair id="m1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="m1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="p2-instance_attributes">
+ <nvpair name="startdelay" value="2" id="p2-instance_attributes-startdelay"/>
+ <nvpair name="mondelay" value="2" id="p2-instance_attributes-mondelay"/>
+ <nvpair name="stopdelay" value="2" id="p2-instance_attributes-stopdelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="30s" interval="10s" id="p2-monitor-10s"/>
+ <op name="start" timeout="30s" interval="0s" id="p2-start-0s"/>
+ <op name="stop" timeout="30s" interval="0s" id="p2-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource maintenance p2 off
+.INP: configure
+.INP: _regtest on
+.INP: show xml p2
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="m1">
+ <meta_attributes id="m1-meta_attributes">
+ <nvpair name="promotable" value="true" id="m1-meta_attributes-promotable"/>
+ <nvpair id="m1-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="m1-meta_attributes-maintenance" name="maintenance" value="false"/>
+ </meta_attributes>
+ <primitive id="p2" class="ocf" provider="heartbeat" type="Delay">
+ <instance_attributes id="p2-instance_attributes">
+ <nvpair name="startdelay" value="2" id="p2-instance_attributes-startdelay"/>
+ <nvpair name="mondelay" value="2" id="p2-instance_attributes-mondelay"/>
+ <nvpair name="stopdelay" value="2" id="p2-instance_attributes-stopdelay"/>
+ </instance_attributes>
+ <operations>
+ <op name="monitor" timeout="30s" interval="10s" id="p2-monitor-10s"/>
+ <op name="start" timeout="30s" interval="0s" id="p2-start-0s"/>
+ <op name="stop" timeout="30s" interval="0s" id="p2-stop-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=cli-prefer-p3
+.TRY resource migrate p3 node1
+WARNING: This command 'migrate' is deprecated, please use 'move'
+INFO: "migrate" is accepted as "move"
+.EXT crm_resource --quiet --move --resource 'p3' --node 'node1'
+INFO: Move constraint created for p3 to node1
+INFO: Use `crm resource clear p3` to remove this constraint
+.INP: configure
+.INP: _regtest on
+.INP: show xml cli-prefer-p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints>
+ <rsc_location id="cli-prefer-p3" rsc="p3" role="Started" node="node1" score="INFINITY"/>
+ </constraints>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY resource unmigrate p3
+WARNING: This command 'unmigrate' is deprecated, please use 'clear'
+INFO: "unmigrate" is accepted as "clear"
+.EXT crm_resource --quiet --clear --resource 'p3'
+INFO: Removed migration constraints for p3
+.SETENV showobj=cli-prefer-p3
+.TRY resource migrate p3 node1 force
+WARNING: This command 'migrate' is deprecated, please use 'move'
+INFO: "migrate" is accepted as "move"
+.EXT crm_resource --quiet --move --resource 'p3' --node 'node1' --force
+INFO: Move constraint created for p3 to node1
+INFO: Use `crm resource clear p3` to remove this constraint
+.INP: configure
+.INP: _regtest on
+.INP: show xml cli-prefer-p3
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints>
+ <rsc_location id="cli-prefer-p3" rsc="p3" role="Started" node="node1" score="INFINITY"/>
+ </constraints>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY resource unmigrate p3
+WARNING: This command 'unmigrate' is deprecated, please use 'clear'
+INFO: "unmigrate" is accepted as "clear"
+.EXT crm_resource --quiet --clear --resource 'p3'
+INFO: Removed migration constraints for p3
+.SETENV showobj=p0
+.TRY resource param p0 set a0 "1 2 3"
+.EXT crm_resource --resource 'p0' --set-parameter 'a0' --parameter-value '1 2 3'
+Set 'p0' option: id=p0-instance_attributes-a0 set=p0-instance_attributes name=a0 value=1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes">
+ <nvpair id="p0-instance_attributes-a0" name="a0" value="1 2 3"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource param p0 show a0
+.EXT crm_resource --resource 'p0' --get-parameter 'a0'
+1 2 3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes">
+ <nvpair id="p0-instance_attributes-a0" name="a0" value="1 2 3"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource param p0 delete a0
+.EXT crm_resource --resource 'p0' --delete-parameter 'a0'
+Deleted 'p0' option: id=p0-instance_attributes-a0 name=a0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 set m0 123
+.EXT crm_resource --meta --resource 'p0' --set-parameter 'm0' --parameter-value '123'
+Set 'p0' option: id=p0-meta_attributes-m0 set=p0-meta_attributes name=m0 value=123
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-m0" name="m0" value="123"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 show m0
+.EXT crm_resource --meta --resource 'p0' --get-parameter 'm0'
+123
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-m0" name="m0" value="123"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource meta p0 delete m0
+.EXT crm_resource --meta --resource 'p0' --delete-parameter 'm0'
+Deleted 'p0' option: id=p0-meta_attributes-m0 name=m0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 probe
+INFO: Trace for p0:monitor is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace non-monitor operations
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 start
+INFO: Trace for p0:start is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace the start operation
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource trace p0 stop
+INFO: Trace for p0:stop is written to /var/lib/heartbeat/trace_ra/Dummy
+INFO: Trace set, restart p0 to trace the stop operation
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="monitor" interval="0" id="p0-monitor-0">
+ <instance_attributes id="p0-monitor-0-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-monitor-0-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 probe
+INFO: Stop tracing p0 for operation monitor
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s">
+ <instance_attributes id="p0-start-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-start-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 start
+INFO: Stop tracing p0 for operation start
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s">
+ <instance_attributes id="p0-stop-0s-instance_attributes">
+ <nvpair name="trace_ra" value="1" id="p0-stop-0s-instance_attributes-trace_ra"/>
+ </instance_attributes>
+ </op>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource untrace p0 stop
+INFO: Stop tracing p0 for operation stop
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure group g p0 p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY options manage-children never
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <instance_attributes id="p0-instance_attributes"/>
+ <meta_attributes id="p0-meta_attributes"/>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY configure clone cg g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY options manage-children always
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <group id="g">
+ <meta_attributes id="g-meta_attributes">
+ <nvpair id="g-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop g
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p0
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ <meta_attributes id="p0-meta_attributes">
+ <nvpair id="p0-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop cg
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource stop p3
+.INP: configure
+.INP: _regtest on
+.INP: show xml p0
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <clone id="cg">
+ <meta_attributes id="cg-meta_attributes">
+ <nvpair id="cg-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <group id="g">
+ <primitive id="p0" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p0-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p0-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p0-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="p3" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p3-stop-0s"/>
+ </operations>
+ <meta_attributes id="p3-meta_attributes">
+ <nvpair id="p3-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=
+.TRY configure rename p3 p4
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY configure primitive p3 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY resource stop p3
+.TRY resource start p3
+.TRY resource cleanup
+.EXT crm_resource --cleanup
+.TRY resource cleanup p3
+.EXT crm_resource --cleanup --resource p3
+.TRY resource cleanup p3 node1
+.EXT crm_resource --cleanup --resource p3 --node node1
+.TRY resource cleanup force
+.EXT crm_resource --cleanup --force
+.TRY resource cleanup p3 force
+.EXT crm_resource --cleanup --resource p3 --force
+.TRY resource cleanup p3 node1 force
+.EXT crm_resource --cleanup --resource p3 --node node1 --force
+.TRY resource refresh
+.EXT crm_resource --refresh
+.TRY resource refresh p3
+.EXT crm_resource --refresh --resource p3
+.TRY resource refresh p3 node1
+.EXT crm_resource --refresh --resource p3 --node node1
+.TRY resource refresh force
+.EXT crm_resource --refresh --force
+.TRY resource refresh p3 force
+.EXT crm_resource --refresh --resource p3 --force
+.TRY resource refresh p3 node1 force
+.EXT crm_resource --refresh --resource p3 --node node1 --force
+.TRY resource stop p3
+.TRY configure rm cg
+WARNING: This command 'rm' is deprecated, please use 'delete'
+INFO: "rm" is accepted as "delete"
+.TRY configure ms msg g
+WARNING: "ms" is deprecated. Please use "clone msg g meta promotable=true"
+.TRY resource scores
+.EXT crm_simulate -sUL
+2 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure
+
+Node node1: UNCLEAN (offline)
+
+ st (stonith:null): Stopped
+ Stopped: [ node1 ]
+ Stopped: [ node1 ]
+ p3 (ocf::heartbeat:Dummy): Stopped ( disabled )
+ Stopped: [ node1 ]
+
+Original: node1 capacity:
+pcmk__primitive_assign: st allocation score on node1: 0
+pcmk__clone_assign: c1 allocation score on node1: 0
+pcmk__clone_assign: p1:0 allocation score on node1: 0
+pcmk__primitive_assign: p1:0 allocation score on node1: -INFINITY
+pcmk__clone_assign: m1 allocation score on node1: 0
+pcmk__clone_assign: p2:0 allocation score on node1: 0
+pcmk__primitive_assign: p2:0 allocation score on node1: -INFINITY
+p2:0 promotion score on none: 0
+pcmk__primitive_assign: p3 allocation score on node1: -INFINITY
+pcmk__clone_assign: msg allocation score on node1: 0
+pcmk__clone_assign: g:0 allocation score on node1: 0
+pcmk__clone_assign: p0:0 allocation score on node1: 0
+pcmk__clone_assign: p4:0 allocation score on node1: 0
+pcmk__group_assign: g:0 allocation score on node1: -INFINITY
+pcmk__group_assign: p0:0 allocation score on node1: -INFINITY
+pcmk__group_assign: p4:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: p0:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: p4:0 allocation score on node1: -INFINITY
+g:0 promotion score on none: 0
+Remaining: node1 capacity:
+
+.SETENV showobj=
+.TRY configure primitive p5 Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata stonith:null
+.EXT stonithd metadata
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Delay
+.TRY configure group g1 p5
+.TRY resource manage p5
+.SETENV showobj=p5
+.TRY -F resource maintenance p5 on
+INFO: 'maintenance' conflicts with 'is-managed' attribute. Remove 'is-managed' for resource p5? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes-0">
+ <nvpair id="p5-meta_attributes-0-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource unmanage p5
+INFO: 'is-managed' conflicts with 'maintenance' attribute. Remove 'maintenance' for resource p5? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-is-managed" name="is-managed" value="false"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource maintenance g1
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=g1
+.TRY -F resource manage g1
+INFO: 'is-managed' conflicts with 'maintenance' attribute. Remove 'maintenance' for resource g1? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.TRY resource start p5
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=p5
+.TRY -F resource maintenance p5 on
+.INP: configure
+.INP: _regtest on
+.INP: show xml p5
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes-0">
+ <nvpair id="g1-meta_attributes-0-is-managed" name="is-managed" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ <nvpair id="p5-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
+.SETENV showobj=g1
+.TRY -F resource maintenance g1
+INFO: 'maintenance' conflicts with 'is-managed' attribute. Remove 'is-managed' for resource g1? [YES]
+.INP: configure
+.INP: _regtest on
+.INP: show xml g1
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources>
+ <group id="g1">
+ <meta_attributes id="g1-meta_attributes">
+ <nvpair id="g1-meta_attributes-maintenance" name="maintenance" value="true"/>
+ </meta_attributes>
+ <primitive id="p5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="p5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="p5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="p5-stop-0s"/>
+ </operations>
+ <meta_attributes id="p5-meta_attributes">
+ <nvpair id="p5-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+ <constraints/>
+ </configuration>
+</cib>
+
diff --git a/test/testcases/rset b/test/testcases/rset
new file mode 100644
index 0000000..798e392
--- /dev/null
+++ b/test/testcases/rset
@@ -0,0 +1,21 @@
+show Resource sets
+node node1
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ op start timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+order o1 Serialize: d1 d2 ( d3 d4 )
+colocation c1 inf: d4 ( d1 d2 d3 )
+colocation c2 inf: d1 d2 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+delete d2
+show o1 c1 c2 c3
+delete d4
+show o1 c1 c2 c3
+_test
+verify
+.
diff --git a/test/testcases/rset-xml b/test/testcases/rset-xml
new file mode 100644
index 0000000..842d4df
--- /dev/null
+++ b/test/testcases/rset-xml
@@ -0,0 +1,19 @@
+showxml Resource sets
+node node1
+primitive st stonith:ssh \
+ params hostlist='node1' \
+ op start timeout=60s
+primitive d1 ocf:pacemaker:Dummy
+primitive d2 ocf:heartbeat:Dummy
+primitive d3 ocf:heartbeat:Dummy
+primitive d4 ocf:heartbeat:Dummy
+primitive d5 ocf:heartbeat:Dummy
+order o1 Serialize: d1 d2 ( d3 d4 )
+colocation c1 inf: d4 ( d1 d2 d3 )
+colocation c2 inf: d1 d2 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+delete d2
+delete d4
+_test
+verify
+.
diff --git a/test/testcases/rset-xml.exp b/test/testcases/rset-xml.exp
new file mode 100644
index 0000000..51c431a
--- /dev/null
+++ b/test/testcases/rset-xml.exp
@@ -0,0 +1,53 @@
+<?xml version="1.0" ?>
+<cib>
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node uname="node1" id="node1"/>
+ </nodes>
+ <resources>
+ <primitive id="st" class="stonith" type="ssh">
+ <instance_attributes id="st-instance_attributes">
+ <nvpair name="hostlist" value="node1" id="st-instance_attributes-hostlist"/>
+ </instance_attributes>
+ <operations>
+ <op name="start" timeout="60s" interval="0s" id="st-start-0s"/>
+ <op name="monitor" timeout="20" interval="3600" id="st-monitor-3600"/>
+ <op name="stop" timeout="15" interval="0s" id="st-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d1" class="ocf" provider="pacemaker" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d1-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d1-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d1-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d3" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d3-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d3-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d3-stop-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="d5" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op name="monitor" timeout="20s" interval="10s" id="d5-monitor-10s"/>
+ <op name="start" timeout="20s" interval="0s" id="d5-start-0s"/>
+ <op name="stop" timeout="20s" interval="0s" id="d5-stop-0s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_order id="o1" kind="Serialize" first="d1" then="d3"/>
+ <rsc_colocation id="c1" score="INFINITY">
+ <resource_set sequential="false" id="c1-1">
+ <resource_ref id="d1"/>
+ <resource_ref id="d3"/>
+ </resource_set>
+ </rsc_colocation>
+ <rsc_colocation id="c2" score="INFINITY" rsc="d3" with-rsc="d1"/>
+ <rsc_colocation id="c3" score="INFINITY" rsc="d3" with-rsc="d1"/>
+ </constraints>
+ </configuration>
+</cib>
diff --git a/test/testcases/rset.exp b/test/testcases/rset.exp
new file mode 100644
index 0000000..79b03f4
--- /dev/null
+++ b/test/testcases/rset.exp
@@ -0,0 +1,66 @@
+.TRY Resource sets
+.INP: configure
+.INP: _regtest on
+.INP: erase
+.INP: erase nodes
+.INP: node node1
+.INP: primitive st stonith:ssh params hostlist='node1' op start timeout=60s
+.EXT crm_resource --show-metadata stonith:ssh
+.EXT stonithd metadata
+.INP: primitive d1 ocf:pacemaker:Dummy
+.EXT crm_resource --show-metadata ocf:pacemaker:Dummy
+.INP: primitive d2 ocf:heartbeat:Dummy
+.EXT crm_resource --show-metadata ocf:heartbeat:Dummy
+.INP: primitive d3 ocf:heartbeat:Dummy
+.INP: primitive d4 ocf:heartbeat:Dummy
+.INP: primitive d5 ocf:heartbeat:Dummy
+.INP: order o1 Serialize: d1 d2 ( d3 d4 )
+.INP: colocation c1 inf: d4 ( d1 d2 d3 )
+.INP: colocation c2 inf: d1 d2 d3 d4
+.INP: colocation c3 inf: ( d3 d4 ) ( d1 d2 )
+.INP: delete d2
+INFO: 16: constraint order:o1 updated
+INFO: 16: constraint colocation:c1 updated
+INFO: 16: constraint colocation:c2 updated
+INFO: 16: constraint colocation:c3 updated
+.INP: show o1 c1 c2 c3
+colocation c1 inf: d4 ( d1 d3 )
+colocation c2 inf: d1 d3 d4
+colocation c3 inf: ( d3 d4 ) ( d1 )
+order o1 Serialize: d1 ( d3 d4 )
+.INP: delete d4
+INFO: 18: constraint order:o1 updated
+INFO: 18: constraint colocation:c1 updated
+INFO: 18: constraint colocation:c2 updated
+INFO: 18: constraint colocation:c3 updated
+.INP: show o1 c1 c2 c3
+colocation c1 inf: ( d1 d3 )
+colocation c2 inf: d3 d1
+colocation c3 inf: d3 d1
+order o1 Serialize: d1 d3
+.INP: _test
+.INP: verify
+.INP: show
+node node1
+primitive d1 ocf:pacemaker:Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d3 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive d5 Dummy \
+ op monitor timeout=20s interval=10s \
+ op start timeout=20s interval=0s \
+ op stop timeout=20s interval=0s
+primitive st stonith:ssh \
+ params hostlist=node1 \
+ op start timeout=60s interval=0s \
+ op monitor timeout=20 interval=3600 \
+ op stop timeout=15 interval=0s
+colocation c1 inf: ( d1 d3 )
+colocation c2 inf: d3 d1
+colocation c3 inf: d3 d1
+order o1 Serialize: d1 d3
+.INP: commit
diff --git a/test/testcases/scripts b/test/testcases/scripts
new file mode 100644
index 0000000..b89d75d
--- /dev/null
+++ b/test/testcases/scripts
@@ -0,0 +1,14 @@
+session Cluster scripts
+script
+list
+list all
+list names
+list names all
+list all names
+list bogus
+show mailto
+verify mailto id=foo email=test@example.com subject=hello
+run mailto id=foo email=test@example.com subject=hello nodes=node1 dry_run=true
+json '["show", "mailto"]'
+json '["verify", "mailto", {"id":"foo", "email":"test@example.com", "subject":"hello"}]'
+.
diff --git a/test/testcases/scripts.exp b/test/testcases/scripts.exp
new file mode 100644
index 0000000..ca086c9
--- /dev/null
+++ b/test/testcases/scripts.exp
@@ -0,0 +1,305 @@
+.TRY Cluster scripts
+.INP: script
+.INP: list
+.EXT crm_resource --show-metadata ocf:heartbeat:apache
+.EXT crm_resource --show-metadata ocf:heartbeat:IPaddr2
+.EXT crm_resource --show-metadata ocf:heartbeat:Filesystem
+.EXT crm_resource --show-metadata ocf:heartbeat:mysql
+.EXT crm_resource --show-metadata systemd:cryptctl-server
+.EXT crm_resource --show-metadata ocf:heartbeat:db2
+.EXT crm_resource --show-metadata ocf:heartbeat:exportfs
+.EXT crm_resource --show-metadata systemd:haproxy
+.EXT crm_resource --show-metadata ocf:heartbeat:LVM
+.EXT crm_resource --show-metadata ocf:heartbeat:MailTo
+.EXT crm_resource --show-metadata ocf:heartbeat:nginx
+.EXT crm_resource --show-metadata ocf:heartbeat:Raid1
+Basic:
+
+health Verify health and configuration
+mailto E-Mail
+virtual-ip Virtual IP
+
+Database:
+
+database MySQL/MariaDB Database
+db2 IBM DB2 Database
+db2-hadr IBM DB2 Database with HADR
+oracle Oracle Database
+
+Filesystem:
+
+clvm Cluster-aware LVM (lvmlockd)
+clvm-vg Cluster-aware LVM (auto activation)
+drbd DRBD Block Device
+filesystem File System (mount point)
+gfs2 GFS2 File System (Cloned)
+lvm-drbd LVM Group on DRBD
+ocfs2 OCFS2 File System
+raid-lvm RAID Hosting LVM
+
+NFS:
+
+exportfs NFS Exported File System
+nfsserver NFS Server
+nfsserver-lvm-drbd NFS Server on LVM and DRBD
+
+SAP:
+
+sap-as SAP ASCS Instance
+sap-ci SAP Central Instance
+sap-db SAP Database Instance
+sap-simple-stack SAP Simple Stack Instance
+sap-simple-stack-plus SAP SimpleStack+ Instance
+
+Server:
+
+apache Apache Webserver
+haproxy HAProxy
+nginx Nginx Webserver
+
+Stonith:
+
+libvirt STONITH for libvirt (kvm / Xen)
+sbd SBD, Shared storage based fencing
+vmware Fencing using vCenter / ESX Server
+
+System management:
+
+cryptctl A utility for setting up LUKS-based disk encryption
+
+.INP: list all
+Basic:
+
+health Verify health and configuration
+mailto E-Mail
+virtual-ip Virtual IP
+
+Database:
+
+database MySQL/MariaDB Database
+db2 IBM DB2 Database
+db2-hadr IBM DB2 Database with HADR
+oracle Oracle Database
+
+Filesystem:
+
+clvm Cluster-aware LVM (lvmlockd)
+clvm-vg Cluster-aware LVM (auto activation)
+drbd DRBD Block Device
+filesystem File System (mount point)
+gfs2 GFS2 File System (Cloned)
+lvm-drbd LVM Group on DRBD
+ocfs2 OCFS2 File System
+raid-lvm RAID Hosting LVM
+
+NFS:
+
+exportfs NFS Exported File System
+nfsserver NFS Server
+nfsserver-lvm-drbd NFS Server on LVM and DRBD
+
+SAP:
+
+sap-as SAP ASCS Instance
+sap-ci SAP Central Instance
+sap-db SAP Database Instance
+sap-simple-stack SAP Simple Stack Instance
+sap-simple-stack-plus SAP SimpleStack+ Instance
+
+Script:
+
+check-uptime Check uptime of nodes
+gfs2-base GFS2 File System Base (Cloned)
+lvm Controls the availability of an LVM Volume Group
+raid1 Manages Linux software RAID (MD) devices on shared storage
+sapdb SAP Database Instance
+sapinstance SAP Instance
+sbd-device Create SBD Device
+
+Server:
+
+apache Apache Webserver
+haproxy HAProxy
+nginx Nginx Webserver
+
+Stonith:
+
+libvirt STONITH for libvirt (kvm / Xen)
+sbd SBD, Shared storage based fencing
+vmware Fencing using vCenter / ESX Server
+
+System management:
+
+cryptctl A utility for setting up LUKS-based disk encryption
+
+.INP: list names
+apache
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+haproxy
+health
+libvirt
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sbd
+virtual-ip
+vmware
+.INP: list names all
+apache
+check-uptime
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+gfs2-base
+haproxy
+health
+libvirt
+lvm
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+raid1
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sapdb
+sapinstance
+sbd
+sbd-device
+virtual-ip
+vmware
+.INP: list all names
+apache
+check-uptime
+clvm
+clvm-vg
+cryptctl
+database
+db2
+db2-hadr
+drbd
+exportfs
+filesystem
+gfs2
+gfs2-base
+haproxy
+health
+libvirt
+lvm
+lvm-drbd
+mailto
+nfsserver
+nfsserver-lvm-drbd
+nginx
+ocfs2
+oracle
+raid-lvm
+raid1
+sap-as
+sap-ci
+sap-db
+sap-simple-stack
+sap-simple-stack-plus
+sapdb
+sapinstance
+sbd
+sbd-device
+virtual-ip
+vmware
+.INP: list bogus
+ERROR: 7: script.list: Unexpected argument 'bogus': expected [all|names]
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("Unexpected argument '%s': expected [all|names]" % (arg))
+ raise ValueError(msg)
+ValueError: Unexpected argument 'bogus': expected [all|names]
+.INP: show mailto
+mailto (Basic)
+E-Mail
+
+Notifies recipient by e-mail in the event of a resource takeover.
+
+1. Notifies recipients by email in the event of resource takeover
+
+ id (required) (unique)
+ Identifier for the cluster resource
+ email (required)
+ Email address
+ subject
+ Subject
+
+
+.INP: verify mailto id=foo email=test@example.com subject=hello
+1. Ensure mail package is installed
+
+ mailx
+
+2. Configure cluster resources
+
+ primitive foo ocf:heartbeat:MailTo
+ email="test@example.com"
+ subject="hello"
+ op start timeout="10"
+ op stop timeout="10"
+ op monitor interval="10" timeout="10"
+
+ clone c-foo foo
+
+.INP: run mailto id=foo email=test@example.com subject=hello nodes=node1 dry_run=true
+INFO: 10: E-Mail
+INFO: 10: Nodes: node1
+** all - #!/usr/bin/env python3
+import crm_script
+import crm_init
+
+crm_init.install_packages(['mailx'])
+crm_script.exit_ok(True)
+
+INFO: 10: Ensure mail package is installed
+** localhost - temporary file <<END
+primitive foo ocf:heartbeat:MailTo email="test@example.com" subject="hello" op start timeout="10" op stop timeout="10" op monitor interval="10" timeout="10"
+clone c-foo foo
+
+END
+
+** localhost - crm --wait --no configure load update <<temporary file>>
+INFO: 10: Configure cluster resources
+.INP: json '["show", "mailto"]'
+{"category": "basic", "longdesc": "Notifies recipient by e-mail in the event of a resource takeover.", "name": "mailto", "shortdesc": "E-Mail", "steps": [{"longdesc": " This is a resource agent for MailTo. It sends email to a sysadmin\nwhenever a takeover occurs.", "parameters": [{"advanced": false, "longdesc": "", "name": "id", "required": true, "shortdesc": "Identifier for the cluster resource", "type": "resource", "unique": true}, {"advanced": false, "example": "", "longdesc": " The email address of sysadmin.", "name": "email", "required": true, "shortdesc": "Email address", "type": "email", "unique": false}, {"advanced": false, "example": "Resource Group", "longdesc": " The subject of the email.", "name": "subject", "required": false, "shortdesc": "Subject", "type": "string", "unique": false}], "required": true, "shortdesc": "Notifies recipients by email in the event of resource takeover"}]}
+.INP: json '["verify", "mailto", {"id":"foo", "email":"test@example.com", "subject":"hello"}]'
+{"longdesc": "", "name": "install", "nodes": "", "shortdesc": "Ensure mail package is installed", "text": "mailx"}
+{"longdesc": "", "name": "cib", "nodes": "", "shortdesc": "Configure cluster resources", "text": "primitive foo ocf:heartbeat:MailTo\n\temail=\"test@example.com\"\n\tsubject=\"hello\"\n\top start timeout=\"10\"\n\top stop timeout=\"10\"\n\top monitor interval=\"10\" timeout=\"10\"\n\nclone c-foo foo"}
diff --git a/test/testcases/scripts.filter b/test/testcases/scripts.filter
new file mode 100755
index 0000000..05e098a
--- /dev/null
+++ b/test/testcases/scripts.filter
@@ -0,0 +1,4 @@
+#!/usr/bin/awk -f
+# 1. replace .EXT [path/]<cmd> <parameter> with .EXT <cmd> <parameter>
+/\*\* localhost - crm --wait --no configure load update (\/tmp\/crm-tmp-.+)/ { gsub(/.*/, "<<temporary file>>", $NF) }
+{ print }
diff --git a/test/testcases/shadow b/test/testcases/shadow
new file mode 100644
index 0000000..3bfd389
--- /dev/null
+++ b/test/testcases/shadow
@@ -0,0 +1,10 @@
+filesession Shadow CIB management
+cib
+new regtest force
+reset regtest
+use regtest
+commit regtest
+delete regtest
+use
+delete regtest
+.
diff --git a/test/testcases/shadow.exp b/test/testcases/shadow.exp
new file mode 100644
index 0000000..f5ec084
--- /dev/null
+++ b/test/testcases/shadow.exp
@@ -0,0 +1,24 @@
+.TRY Shadow CIB management
+.INP: cib
+.INP: new regtest force
+.EXT >/dev/null </dev/null crm_shadow -b -c 'regtest' --force
+INFO: 2: cib.new: regtest shadow CIB created
+.INP: reset regtest
+.EXT >/dev/null </dev/null crm_shadow -b -r 'regtest'
+INFO: 3: cib.reset: copied live CIB to regtest
+.INP: use regtest
+.INP: commit regtest
+.EXT >/dev/null </dev/null crm_shadow -b -C 'regtest' --force
+INFO: 5: cib.commit: committed 'regtest' shadow CIB to the cluster
+.INP: delete regtest
+ERROR: 6: cib.delete: regtest shadow CIB is in use
+Traceback (most recent call last):
+ rv = self.execute_command() is not False
+ rv = self.command_info.function(*arglist)
+ context.fatal_error("%s shadow CIB is in use" % name)
+ raise ValueError(msg)
+ValueError: regtest shadow CIB is in use
+.INP: use
+.INP: delete regtest
+.EXT >/dev/null </dev/null crm_shadow -b -D 'regtest' --force
+INFO: 8: cib.delete: regtest shadow CIB deleted
diff --git a/test/testcases/xmlonly.sh b/test/testcases/xmlonly.sh
new file mode 100755
index 0000000..15e6427
--- /dev/null
+++ b/test/testcases/xmlonly.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+# extract the xml cib
+#
+sed -n '/^<?xml/,/^<\/cib>/p'
diff --git a/test/unittests/__init__.py b/test/unittests/__init__.py
new file mode 100644
index 0000000..18f2638
--- /dev/null
+++ b/test/unittests/__init__.py
@@ -0,0 +1,64 @@
+from __future__ import unicode_literals
+import os
+import sys
+
+try:
+ import crmsh
+except ImportError as e:
+ pass
+
+from crmsh import config
+from crmsh import options
+config.core.debug = True
+options.regression_tests = True
+_here = os.path.dirname(__file__)
+config.path.sharedir = os.path.join(_here, "../../doc")
+config.path.crm_dtd_dir = os.path.join(_here, "schemas")
+
+os.environ["CIB_file"] = "test"
+
+
+# install a basic CIB
+from crmsh import cibconfig
+
+_CIB = """
+<cib epoch="0" num_updates="0" admin_epoch="0" validate-with="pacemaker-1.2" cib-last-written="Mon Mar 3 23:58:36 2014" update-origin="ha-one" update-client="crmd" update-user="hacluster" crm_feature_set="3.0.9" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+ <nvpair name="no-quorum-policy" value="ignore" id="cib-bootstrap-options-no-quorum-policy"/>
+ <nvpair name="dc-version" value="1.1.11+git20140221.0b7d85a-115.1-1.1.11+git20140221.0b7d85a" id="cib-bootstrap-options-dc-version"/>
+ <nvpair name="cluster-infrastructure" value="corosync" id="cib-bootstrap-options-cluster-infrastructure"/>
+ <nvpair name="symmetric-cluster" value="true" id="cib-bootstrap-options-symmetric-cluster"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="ha-one" uname="ha-one"/>
+ <node id="ha-two" uname="ha-two"/>
+ <node id="ha-three" uname="ha-three"/>
+ </nodes>
+ <resources>
+ </resources>
+ <constraints>
+ </constraints>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+ <nvpair name="migration-threshold" value="0" id="rsc-options-migration-threshold"/>
+ </meta_attributes>
+ </rsc_defaults>
+ <op_defaults>
+ <meta_attributes id="op-options">
+ <nvpair name="timeout" value="200" id="op-options-timeout"/>
+ <nvpair name="record-pending" value="true" id="op-options-record-pending"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+ <status>
+ </status>
+</cib>
+"""
+
+cibconfig.cib_factory.initialize(cib=_CIB)
+
diff --git a/test/unittests/bug-862577_corosync.conf b/test/unittests/bug-862577_corosync.conf
new file mode 100644
index 0000000..09b1225
--- /dev/null
+++ b/test/unittests/bug-862577_corosync.conf
@@ -0,0 +1,51 @@
+# Please read the corosync.conf.5 manual page
+
+service {
+ ver: 1
+ name: pacemaker
+}
+totem {
+ version: 2
+ secauth: off
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+
+# Following are old corosync 1.4.x defaults from SLES
+# token: 5000
+# token_retransmits_before_loss_const: 10
+# join: 60
+# consensus: 6000
+# vsftype: none
+# max_messages: 20
+# threads: 0
+
+ crypto_cipher: none
+ crypto_hash: none
+
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.122.2.13
+ mcastaddr: 239.91.185.71
+ mcastport: 5405
+ ttl: 1
+ }
+}
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: yes
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 2
+}
diff --git a/test/unittests/corosync.conf.1 b/test/unittests/corosync.conf.1
new file mode 100644
index 0000000..7b3abed
--- /dev/null
+++ b/test/unittests/corosync.conf.1
@@ -0,0 +1,81 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+
+ # crypto_cipher and crypto_hash: Used for mutual node authentication.
+ # If you choose to enable this, then do remember to create a shared
+ # secret with "corosync-keygen".
+ # enabling crypto_cipher, requires also enabling of crypto_hash.
+ crypto_cipher: none
+ crypto_hash: none
+
+ # interface: define at least one interface to communicate
+ # over. If you define more than one interface stanza, you must
+ # also set rrp_mode.
+ interface {
+ # Rings must be consecutively numbered, starting at 0.
+ ringnumber: 0
+ # This is normally the *network* address of the
+ # interface to bind to. This ensures that you can use
+ # identical instances of this configuration file
+ # across all your cluster nodes, without having to
+ # modify this option.
+ bindnetaddr: 192.168.1.0
+ # However, if you have multiple physical network
+ # interfaces configured for the same subnet, then the
+ # network address alone is not sufficient to identify
+ # the interface Corosync should bind to. In that case,
+ # configure the *host* address of the interface
+ # instead:
+ # bindnetaddr: 192.168.1.1
+ # When selecting a multicast address, consider RFC
+ # 2365 (which, among other things, specifies that
+ # 239.255.x.x addresses are left to the discretion of
+ # the network administrator). Do not reuse multicast
+ # addresses across multiple Corosync clusters sharing
+ # the same network.
+ mcastaddr: 239.255.1.1
+ # Corosync uses the port you specify here for UDP
+ # messaging, and also the immediately preceding
+ # port. Thus if you set this to 5405, Corosync sends
+ # messages over UDP ports 5405 and 5404.
+ mcastport: 5405
+ # Time-to-live for cluster communication packets. The
+ # number of hops (routers) that this ring will allow
+ # itself to pass. Note that multicast routing must be
+ # specifically enabled on most network routers.
+ ttl: 1
+ }
+}
+
+logging {
+ # Log the source file and line where messages are being
+ # generated. When in doubt, leave off. Potentially useful for
+ # debugging.
+ fileline: off
+ # Log to standard error. When in doubt, set to no. Useful when
+ # running in the foreground (when invoking "corosync -f")
+ to_stderr: no
+ # Log to a log file. When set to "no", the "logfile" option
+ # must not be set.
+ to_logfile: yes
+ logfile: /var/log/cluster/corosync.log
+ # Log to the system log daemon. When in doubt, set to yes.
+ to_syslog: yes
+ # Log debug messages (very verbose). When in doubt, leave off.
+ debug: off
+ # Log messages with time stamps. When in doubt, set to on
+ # (unless you are only logging to syslog, where double
+ # timestamps can be annoying).
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ #provider: corosync_votequorum
+}
diff --git a/test/unittests/corosync.conf.2 b/test/unittests/corosync.conf.2
new file mode 100644
index 0000000..0438451
--- /dev/null
+++ b/test/unittests/corosync.conf.2
@@ -0,0 +1,58 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+
+ crypto_cipher: none
+ crypto_hash: none
+
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.16.35.0
+ mcastport: 5405
+ ttl: 1
+ }
+ transport: udpu
+}
+
+logging {
+ fileline: off
+ to_logfile: yes
+ to_syslog: yes
+ logfile: /var/log/cluster/corosync.log
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+}
+
+nodelist {
+ node {
+ ring0_addr: 10.16.35.101
+ nodeid: 1
+ }
+
+ node {
+ ring0_addr: 10.16.35.102
+ nodeid: 2
+ }
+
+ node {
+ ring0_addr: 10.16.35.103
+ }
+
+ node {
+ ring0_addr: 10.16.35.104
+ }
+
+ node {
+ ring0_addr: 10.16.35.105
+ }
+}
+
+quorum {
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+}
diff --git a/test/unittests/corosync.conf.3 b/test/unittests/corosync.conf.3
new file mode 100644
index 0000000..2cc001f
--- /dev/null
+++ b/test/unittests/corosync.conf.3
@@ -0,0 +1,68 @@
+# Please read the corosync.conf.5 manual page
+totem {
+ version: 2
+ secauth: on
+ crypto_hash: sha1
+ crypto_cipher: aes256
+ cluster_name: hacluster
+ clear_node_high_bit: yes
+ token: 5000
+ token_retransmits_before_loss_const: 10
+ join: 60
+ consensus: 6000
+ max_messages: 20
+ interface {
+ ringnumber: 0
+ bindnetaddr: 10.67.16.0
+ mcastaddr: 239.23.255.56
+ mcastport: 5405
+ ttl: 1
+ }
+
+}
+
+logging {
+ fileline: off
+ to_stderr: no
+ to_logfile: no
+ logfile: /var/log/cluster/corosync.log
+ to_syslog: yes
+ debug: off
+ timestamp: on
+ logger_subsys {
+ subsys: QUORUM
+ debug: off
+ }
+
+}
+
+quorum {
+
+ # Enable and configure quorum subsystem (default: off)
+ # see also corosync.conf.5 and votequorum.5
+ provider: corosync_votequorum
+ expected_votes: 0
+ two_node: 0
+ device {
+ votes: 1
+ model: net
+ net {
+ tls: on
+ host: 10.10.10.3
+ port: 5403
+ algorithm: ffsplit
+ tie_breaker: lowest
+ }
+
+ }
+
+}
+
+nodelist {
+ node {
+ ring0_addr: 10.67.18.221
+ nodeid: 172167901
+ }
+
+}
+
diff --git a/test/unittests/pacemaker.log b/test/unittests/pacemaker.log
new file mode 100644
index 0000000..1da52a6
--- /dev/null
+++ b/test/unittests/pacemaker.log
@@ -0,0 +1,923 @@
+Set r/w permissions for uid=90, gid=90 on /var/log/pacemaker/pacemaker.log
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_cluster_type) info: Detected an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (mcp_read_config) info: Reading configure for stack: corosync
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) notice: Starting Pacemaker 2.0.1+20190304.9e909a5bd-1.4 | build=2.0.1+20190304.9e909a5bd features: generated-manpages agent-manpages ncurses libqb-logging libqb-ipc lha-fencing systemd nagios corosync-native atomic-attrd acls cibsecrets
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Maximum core file size is: 18446744073709551615
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (qb_ipcs_us_publish) info: server name: pacemakerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Created entry 58f6784c-39df-4fbe-90df-d462a893c0d4/0x55b94329e2e0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (cluster_connect_quorum) notice: Quorum acquired
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-based
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1943 for process pacemaker-based
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1944 for process pacemaker-fenced
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1945 for process pacemaker-execd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-attrd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1946 for process pacemaker-attrd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1947 for process pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Using uid=90 and group=90 for process pacemaker-controld
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (start_child) info: Forked child 1948 for process pacemaker-controld
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Starting mainloop
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=4 members=1
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-1 state is now member | nodeid=1 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.0: node 1 joined
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (qb_ipcs_us_publish) info: server name: lrmd
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: Starting
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (qb_ipcs_us_publish) info: server name: pengine
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (main) info: Starting pacemaker-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk__daemon_can_write) notice: /var/lib/pacemaker/cib/cib.xml not found: No such file or directory
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (retrieveCib) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.xml (digest: /var/lib/pacemaker/cib/cib.xml.sig)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory (2)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (readCibXmlFile) warning: Primary configuration corrupt or unusable, trying backups in /var/lib/pacemaker/cib
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (readCibXmlFile) warning: Continuing with an empty configuration.
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (validate_with_relaxng) info: Creating RNG parser context
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry ce9f3668-a138-4e36-aec8-124d76e0e8b8/0x5649957b59c0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:18 15sp1-1 pacemaker-attrd [1946] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-attrd [1946] (main) info: Starting up
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (main) info: CRM Git Version: 2.0.1+20190304.9e909a5bd-1.4 (2.0.1+20190304.9e909a5bd)
+Apr 03 11:01:18 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_STARTUP received in state S_STARTING from crmd_init
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (startCib) info: CIB Initialization completed successfully
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry 6c579ba7-433c-4d00-88f8-a4a9534cd017/0x56042ff25eb0 for node (null)/1 (1 total)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_ro
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_rw
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (qb_ipcs_us_publish) info: server name: cib_shm
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_init) info: Starting pacemaker-based mainloop
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.0: node 1 joined
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.0: node 1 (15sp1-1) is member
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory (2)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-0.raw
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.0.0 of the CIB to disk (digest: 48469f360effdb63efdbbf08822875d8)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.glQmxd (digest: /var/lib/pacemaker/cib/cib.OJiM5q)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: CIB connection active
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry 97d13205-d013-44ab-bd52-01d8ec4132f7/0x55995aef1580 for node (null)/1 (1 total)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=1 previous=unknown source=crm_update_peer_proc
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: Cluster connection active
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_erase_attrs) info: Clearing transient attributes from CIB | xpath=//node_state[@uname='15sp1-1']/transient_attributes
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (qb_ipcs_us_publish) info: server name: attrd
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (main) info: Accepting attribute updates
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-1]: (null) -> 2 from 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_start_election_if_needed) info: Starting an election to determine the writer
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (election_check) info: election-attrd won by local node
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (attrd_declare_winner) notice: Recorded local node as attribute writer (was unset)
+Apr 03 11:01:19 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 1 private change for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/transient_attributes to all (origin=local/attrd/2)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_cluster_type) info: Verifying cluster type: 'corosync'
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_cluster_type) info: Assuming an active 'corosync' cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_cluster_connect) notice: Connecting to cluster infrastructure: corosync
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Created entry 0e996957-89f6-4cd2-af8f-271088c53399/0x558f48e15840 for node (null)/1 (1 total)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 1 has uuid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: cluster_connect_cpg: Node (null)[1] - corosync-cpg is now online
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (init_cs_connection_once) info: Connection to 'corosync': established
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/transient_attributes: OK (rc=0, origin=15sp1-1/attrd/2, version=0.0.0)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 1 is now known as 15sp1-1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-1 is now in unknown state
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (cluster_connect_quorum) notice: Quorum acquired
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (setup_cib) info: Watching for stonith topology changes
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (qb_ipcs_us_publish) info: server name: stonith-ng
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (main) info: Starting pacemaker-fenced mainloop
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (init_cib_cache_cb) info: Updating device list from the cib: init
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (cib_devices_update) info: Updating devices to version 0.0.0
+Apr 03 11:01:19 15sp1-1 pacemaker-fenced [1944] (unpack_nodes) info: Creating a fake local node
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_ha_control) info: Connected to the cluster
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (lrmd_ipc_connect) info: Connecting to executor
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_lrm_control) info: Connection to the executor established
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, no membership data (0000000000100000)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=4 members=1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-1 state is now member | nodeid=1 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-1 is now member (was in unknown state)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 1
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Defaulting to uname -n for the local corosync node name
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, Config not read (0000000000000040)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.0: node 1 joined
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.0: node 1 (15sp1-1) is member
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) info: Delaying start, Config not read (0000000000000040)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/2)
+Apr 03 11:01:19 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/2, version=0.0.0)
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (qb_ipcs_us_publish) info: server name: crmd
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_started) notice: The local CRM is operational
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_PENDING received in state S_STARTING from do_started
+Apr 03 11:01:19 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_STARTING -> S_PENDING | input=I_PENDING cause=C_FSA_INTERNAL origin=do_started
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Election Trigger (I_DC_TIMEOUT) just popped (20000ms)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_log) warning: Input I_DC_TIMEOUT received in state S_PENDING from crm_timer_popped
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_PENDING -> S_ELECTION | input=I_DC_TIMEOUT cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (election_check) info: election-DC won by local node
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_ELECTION_DC received in state S_ELECTION from election_win_cb
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_ELECTION -> S_INTEGRATION | input=I_ELECTION_DC cause=C_FSA_INTERNAL origin=election_win_cb
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_te_control) info: Registering TE UUID: 305c37e8-0981-497b-a285-c430070e70ae
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (set_graph_functions) info: Setting custom graph functions
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_takeover) info: Taking over DC status for this partition
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_readwrite) info: We are now in R/W mode
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_master operation for section 'all': OK (rc=0, origin=local/crmd/5, version=0.0.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/6)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/6, version=0.0.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/8)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.0.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.1.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config: <cluster_property_set id="cib-bootstrap-options"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </cluster_property_set>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/8, version=0.1.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/10)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.1.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.2.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.1+20190304.9e909a5bd-1.4-2.0.1+20190304.9e909a5bd"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/10, version=0.2.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/12)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.2.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.3.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=3
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 4
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_all) info: join-1: Waiting on 1 outstanding join acks
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (update_dc) info: Set DC to 15sp1-1 (3.1.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: update_dc: Node 15sp1-1[1] - expected state is now member (was (null))
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/12, version=0.3.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section crm_config to all (origin=local/crmd/14)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.3.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.0 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=4
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="hacluster"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section crm_config: OK (rc=0, origin=15sp1-1/crmd/14, version=0.4.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: 1df7ee72464178ed9ef4d38760c5c496
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.4.0 with 0.4.0 from 15sp1-1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/19, version=0.4.0)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/21)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/22)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/23)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.0 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.1 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/nodes: <node id="1" uname="15sp1-1"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/21, version=0.4.1)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/22, version=0.4.1)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.1 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.2 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status: <node_state id="1" uname="15sp1-1" in_ccm="true" crmd="online" crm-debug-origin="do_lrm_query_internal"/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm id="1">
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </node_state>
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/23, version=0.4.2)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=27)
+Apr 03 11:01:40 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition -1 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/25)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/26)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/27)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/25, version=0.4.2)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.2 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.3 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=3
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/26, version=0.4.3)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.3 2
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.4.4 (null)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=4, @have-quorum=1, @dc-uuid=1
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/27, version=0.4.4)
+Apr 03 11:01:40 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-1.raw
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.1.0 of the CIB to disk (digest: da12c1ea82516c83c42bbb6af78f7c43)
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.Q2Mefv (digest: /var/lib/pacemaker/cib/cib.lYgCoM)
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: Resource start-up disabled since no STONITH resources have been defined
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: Either configure some or disable STONITH with the stonith-enabled option
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_resources) error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status_fencing) info: Node 15sp1-1 is active
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 0, saving inputs in /var/lib/pacemaker/pengine/pe-input-0.bz2
+Apr 03 11:01:41 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Configuration errors found during scheduler processing, please run "crm_verify -L" to identify issues
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 0 (ref=pe_calc-dc-1554260501-7) derived from /var/lib/pacemaker/pengine/pe-input-0.bz2
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 0 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-0.bz2): Complete
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:01:41 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-2.raw
+Apr 03 11:01:41 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.4.0 of the CIB to disk (digest: 41a7a4f3446765b9550c8eed97655f87)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.k86jdq (digest: /var/lib/pacemaker/cib/cib.2VIuZJ)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_apply_diff operation for section 'all' to all (origin=local/cibadmin/2)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.4.4 2
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.0 (null)
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @epoch=5, @num_updates=0
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']: <nvpair name="placement-strategy" value="balanced" id="cib-bootstrap-options-placement-strategy"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration: <rsc_defaults/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <meta_attributes id="rsc-options">
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="migration-threshold" value="3" id="rsc-options-migration-threshold"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </meta_attributes>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </rsc_defaults>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration: <op_defaults/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <meta_attributes id="op-options">
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="timeout" value="600" id="op-options-timeout"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair name="record-pending" value="true" id="op-options-record-pending"/>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </meta_attributes>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </op_defaults>
+Apr 03 11:01:42 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_apply_diff operation for section 'all': OK (rc=0, origin=15sp1-1/cibadmin/2, version=0.5.0)
+Apr 03 11:01:42 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 0 aborted by cib-bootstrap-options-stonith-enabled doing create stonith-enabled=false: Configuration change | cib=0.5.0 source=te_update_diff_v2:499 path=/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options'] complete=true
+Apr 03 11:01:42 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-3.raw
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: 80483689fd341b672c06963bb25bdd6b)
+Apr 03 11:01:43 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.ZE4jzT (digest: /var/lib/pacemaker/cib/cib.xCbzOh)
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_config) warning: Blind faith: not fencing unseen nodes
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:01:43 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 1, saving inputs in /var/lib/pacemaker/pengine/pe-input-1.bz2
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 1 (ref=pe_calc-dc-1554260503-8) derived from /var/lib/pacemaker/pengine/pe-input-1.bz2
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 1 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-1.bz2): Complete
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:01:43 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:01:47 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 8d4ba34bb3113e36afd6b6bf39fb69a0 for 0.5.0 (0x5604300bf500 0)
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (crm_procfs_pid_of) info: Found pacemaker-based active as process 1943
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (throttle_check_thresholds) notice: High CPU load detected: 1.040000
+Apr 03 11:01:49 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0100 (was ffffffff)
+Apr 03 11:02:19 15sp1-1 pacemaker-controld [1948] (throttle_check_thresholds) info: Moderate CPU load detected: 0.810000
+Apr 03 11:02:19 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0010 (was 0100)
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=8 members=2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=8 members=2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Created entry 1f91ec8f-7986-4c15-be46-302b53ff3193/0x558f48ea7bf0 for node (null)/2 (2 total)
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Created entry de9bd295-1272-4db2-bdc6-6b1906ae5553/0x55b9435a50e0 for node (null)/2 (2 total)
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Obtaining name for new node 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Obtaining name for new node 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/34)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/35)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/34, version=0.5.0)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.0 2
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.1 (null)
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=1
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=post_cache_update
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status: <node_state id="2" in_ccm="true" crmd="offline" crm-debug-origin="post_cache_update"/>
+Apr 03 11:02:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/35, version=0.5.1)
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:30 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=pcmk_quorum_notification
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemakerd [1941] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry 82f3fa83-e1aa-4b46-99aa-91c7dda4969a/0x5649958bed90 for node (null)/2 (2 total)
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 2 joined
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 1 (15sp1-1) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry f747eb13-dfe9-4182-9b3e-00d9f416e88e/0x56042fb54140 for node (null)/2 (2 total)
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.1: node 2 (<unknown>) is member
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 2 joined
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry f2f954cd-386a-4993-9142-8621ae195416/0x55995aef4080 for node (null)/2 (2 total)
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.1: node 2 (<unknown>) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-2]: (null) -> 2 from 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 2 private changes for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-2/attrd/2, version=0.5.1)
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 2 joined
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 1 (15sp1-1) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.1: node 2 (<unknown>) is member
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:02:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-2/crmd/2, version=0.5.1)
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:02:32 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now member
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_INTEGRATION | input=I_NODE_JOIN cause=C_HA_MESSAGE origin=route_message
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: join-1: Processing join_announce request from 15sp1-2 in state S_INTEGRATION
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 8
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase confirmed -> none
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Node join | source=do_dc_join_offer_one:267 complete=true
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Peer Halt | source=do_te_invoke:139 complete=true
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-2[2] - join-1 phase welcomed -> integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: do_dc_join_filter_offer: Node 15sp1-2[2] - expected state is now member (was (null))
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-2=integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: e5b55e525a867a8154545eca60a3828b
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.5.1 with 0.5.1 from 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/37, version=0.5.1)
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-2[2] - join-1 phase integrated -> finalized
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/38)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/39)
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:02:33 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.1 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.2 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/configuration/nodes: <node id="2" uname="15sp1-2"/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/38, version=0.5.2)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/39, version=0.5.2)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/40)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/41)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.2 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.3 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='1']/lrm[@id='1']
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=3
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/40, version=0.5.3)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.3 2
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.4 (null)
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=4
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='1']: <lrm id="1"/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:02:33 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/41, version=0.5.4)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-4.raw
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: bd5d2bae72ccab0f8431984061bf46bf)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-2[2] - join-1 phase finalized -> confirmed
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/lrm
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/lrm to all (origin=local/crmd/42)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/43)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/lrm: OK (rc=0, origin=15sp1-1/crmd/42, version=0.5.4)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.4 2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.5 (null)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=5
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=online, @crm-debug-origin=do_lrm_query_internal, @uname=15sp1-2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <lrm id="2"/>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/43, version=0.5.5)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=48)
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 1 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/46)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/47)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/48)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/46, version=0.5.5)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.5 2
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.6 (null)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=6
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/47, version=0.5.6)
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/48, version=0.5.6)
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:02:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 2, saving inputs in /var/lib/pacemaker/pengine/pe-input-2.bz2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 2 (ref=pe_calc-dc-1554260554-18) derived from /var/lib/pacemaker/pengine/pe-input-2.bz2
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 2 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-2.bz2): Complete
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:02:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:02:34 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.4bNgNm (digest: /var/lib/pacemaker/cib/cib.MRpEpc)
+Apr 03 11:02:39 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 44ae77e9ff79c954e9d39a4b11a48f55 for 0.5.6 (0x56042ffaf6e0 0)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: handle_request: Node 15sp1-2[2] - expected state is now down (was member)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (handle_shutdown_request) info: Creating shutdown request for 15sp1-2 (state=S_IDLE)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting shutdown[15sp1-2]: (null) -> 1554260567 from 15sp1-1
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Sent CIB request 4 with 1 change for shutdown (id n/a, set n/a)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/attrd/4)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.6 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.7 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=7
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <transient_attributes id="2"/>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <instance_attributes id="status-2">
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <nvpair id="status-2-shutdown" name="shutdown" value="1554260567"/>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </instance_attributes>
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </transient_attributes>
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 2 aborted by transient_attributes.2 'create': Transient attribute change | cib=0.5.7 source=abort_unless_down:329 path=/cib/status/node_state[@id='2'] complete=true
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/attrd/4, version=0.5.7)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_cib_callback) info: CIB update 4 result for shutdown: OK | rc=0
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_cib_callback) info: * shutdown[15sp1-2]=1554260567
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is shutting down
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (sched_shutdown_op) notice: Scheduling shutdown of node 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (LogNodeActions) notice: * Shutdown 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 3, saving inputs in /var/lib/pacemaker/pengine/pe-input-3.bz2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 3 (ref=pe_calc-dc-1554260567-19) derived from /var/lib/pacemaker/pengine/pe-input-3.bz2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (te_crm_command) info: Executing crm-event (1) without waiting: do_shutdown on 15sp1-2
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 3 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-3.bz2): Complete
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Node 15sp1-2 is no longer a peer | DC=true old=0x4000000 new=0x0000000
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting transient_attributes status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/transient_attributes
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes to all (origin=local/crmd/51)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: crmd_peer_down: Node 15sp1-2[2] - join-1 phase confirmed -> none
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: do_shutdown of peer 15sp1-2 is in progress | action=1
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/52)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.7 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.8 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='2']/transient_attributes[@id='2']
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=8
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-1/crmd/51, version=0.5.8)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.8 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.9 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=9
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=offline, @crm-debug-origin=peer_update_callback, @join=down, @expected=down
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/52, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (attrd_peer_remove) notice: Removing all 15sp1-2 attributes for peer loss
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_shutdown_req) info: Peer 15sp1-2 is requesting to shut down
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_update_peer_proc
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (crm_reap_dead_member) info: Removing node with name 15sp1-2 and id 2 from membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (reap_crm_member) notice: Purged 1 peer with id=2 and/or uname=15sp1-2 from the membership cache
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.2: node 2 (15sp1-2) left
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now offline
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.2: node 1 (15sp1-1) is member
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=12 members=1
+Apr 03 11:02:47 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_reap_unseen_nodes
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=12 members=1
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now lost | nodeid=2 previous=member source=crm_reap_unseen_nodes
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now lost (was member)
+Apr 03 11:02:47 15sp1-1 pacemaker-controld [1948] (peer_update_callback) notice: do_shutdown of peer 15sp1-2 is complete | action=1
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/53)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/56)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/57)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/53, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/56, version=0.5.9)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.9 2
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.10 (null)
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=10
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=post_cache_update
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @in_ccm=false, @crm-debug-origin=post_cache_update
+Apr 03 11:02:47 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/57, version=0.5.10)
+Apr 03 11:02:49 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0001 (was 0010)
+Apr 03 11:02:52 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 721b7cebe807ad0faf4a6dc35780fe91 for 0.5.10 (0x560430039b10 0)
+Apr 03 11:03:19 15sp1-1 pacemaker-controld [1948] (throttle_send_command) info: New throttle mode: 0000 (was 0001)
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (pcmk_quorum_notification) info: Quorum retained | membership=16 members=2
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_quorum_notification) info: Quorum retained | membership=16 members=2
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now member | nodeid=2 previous=lost source=pcmk_quorum_notification
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (crm_update_peer_state_iter) notice: Node 15sp1-2 state is now member | nodeid=2 previous=lost source=pcmk_quorum_notification
+Apr 03 11:03:30 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Cluster node 15sp1-2 is now member (was lost)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/58)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.10 2
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.11 (null)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=11
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=peer_update_callback
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/58, version=0.5.11)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/61)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/62)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/61, version=0.5.11)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.11 2
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.12 (null)
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=12
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @in_ccm=true, @crm-debug-origin=post_cache_update
+Apr 03 11:03:30 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/62, version=0.5.12)
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 2 joined
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (pcmk_cpg_membership) info: Group event pacemakerd.3: node 2 (15sp1-2) is member
+Apr 03 11:03:30 15sp1-1 pacemakerd [1941] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 joined
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 1 (15sp1-1) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Created entry 5c9c833c-faec-4e40-9451-1eca51fe31c1/0x5649958c6240 for node (null)/2 (2 total)
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 (<unknown>) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 2 joined
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 1 (15sp1-1) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Created entry 53c0909d-78ff-49b5-bf79-9ef7ceb014aa/0x56042ffb1100 for node (null)/2 (2 total)
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (pcmk_cpg_membership) info: Group event cib.3: node 2 (<unknown>) is member
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:31 15sp1-1 pacemaker-based [1943] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 2 joined
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (corosync_node_name) info: Unable to get node name for nodeid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (get_node_name) notice: Could not obtain a node name for corosync nodeid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Created entry cc7d88cd-ec11-4a95-9820-ec156175b0ca/0x55995aef85e0 for node (null)/2 (2 total)
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 has uuid 2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (pcmk_cpg_membership) info: Group event attrd.3: node 2 (<unknown>) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_proc) info: pcmk_cpg_membership: Node (null)[2] - corosync-cpg is now online
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_update_peer_state_iter) notice: Node (null) state is now member | nodeid=2 previous=unknown source=crm_update_peer_proc
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (attrd_peer_update) info: Setting #attrd-protocol[15sp1-2]: (null) -> 2 from 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-attrd [1946] (write_attribute) info: Processed 2 private changes for #attrd-protocol, id=n/a, set=n/a
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (crm_get_peer) info: Node 2 is now known as 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/transient_attributes: OK (rc=0, origin=15sp1-2/attrd/2, version=0.5.12)
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 2 joined
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 1 (15sp1-1) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (pcmk_cpg_membership) info: Group event crmd.3: node 2 (15sp1-2) is member
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_proc) info: pcmk_cpg_membership: Node 15sp1-2[2] - corosync-cpg is now online
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (peer_update_callback) info: Node 15sp1-2 is now a peer | DC=true old=0x0000000 new=0x4000000
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (te_trigger_stonith_history_sync) info: Fence history will be synchronized cluster-wide within 5 seconds
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_INTEGRATION | input=I_NODE_JOIN cause=C_FSA_INTERNAL origin=peer_update_callback
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: An unknown node joined - (re-)offer to any unconfirmed nodes
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Making join offers based on membership 16
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: Skipping 15sp1-1: already known 4
+Apr 03 11:03:32 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Peer Halt | source=do_te_invoke:139 complete=true
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/63)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.12 2
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.13 (null)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=13
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crmd=online, @crm-debug-origin=peer_update_callback
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/63, version=0.5.13)
+Apr 03 11:03:32 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-2/crmd/2, version=0.5.13)
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (do_dc_join_offer_one) info: join-1: Processing join_announce request from 15sp1-2 in state S_INTEGRATION
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_offer_one: Node 15sp1-2[2] - join-1 phase welcomed -> none
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-2
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-2[2] - join-1 phase none -> welcomed
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase confirmed -> none
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (join_make_offer) info: join-1: Sending offer to 15sp1-1
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: join_make_offer: Node 15sp1-1[1] - join-1 phase none -> welcomed
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Node join | source=do_dc_join_offer_one:267 complete=true
+Apr 03 11:03:33 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-1[1] - join-1 phase welcomed -> integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_filter_offer: Node 15sp1-2[2] - join-1 phase welcomed -> integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_expected) info: do_dc_join_filter_offer: Node 15sp1-2[2] - expected state is now member (was down)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_INTEGRATION -> S_FINALIZE_JOIN | input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-2=integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crmd_join_phase_log) info: join-1: 15sp1-1=integrated
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_finalize) info: join-1: Syncing our CIB to the rest of the cluster
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-2[2] - join-1 phase integrated -> finalized
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: finalize_join_for: Node 15sp1-1[1] - join-1 phase integrated -> finalized
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Digest matched on replace from 15sp1-1: 9cc271d2c23b97671004273302f97501
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-1[1] - join-1 phase finalized -> confirmed
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-1
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-1 | xpath=//node_state[@uname='15sp1-1']/lrm
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_replace) info: Replaced 0.5.13 with 0.5.13 from 15sp1-1
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_replace operation for section 'all': OK (rc=0, origin=15sp1-1/crmd/65, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/66)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/67)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-1']/lrm to all (origin=local/crmd/68)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/69)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/66, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/67, version=0.5.13)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.13 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.14 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='1']/lrm[@id='1']
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=14
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-1']/lrm: OK (rc=0, origin=15sp1-1/crmd/68, version=0.5.14)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_peer_join) info: do_dc_join_ack: Node 15sp1-2[2] - join-1 phase finalized -> confirmed
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_dc_join_ack) info: join-1: Updating node state to member for 15sp1-2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (erase_status_tag) info: Deleting lrm status entries for 15sp1-2 | xpath=//node_state[@uname='15sp1-2']/lrm
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.14 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.15 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=15
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='1']: <lrm id="1"/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/69, version=0.5.15)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_delete operation for section //node_state[@uname='15sp1-2']/lrm to all (origin=local/crmd/70)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE | input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (crm_update_quorum) notice: Updating quorum status to true (call=76)
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (abort_transition_graph) info: Transition 3 aborted: Peer Cancelled | source=do_te_invoke:132 complete=true
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/71)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.15 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.16 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: -- /cib/status/node_state[@id='2']/lrm[@id='2']
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=16
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_delete operation for section //node_state[@uname='15sp1-2']/lrm: OK (rc=0, origin=15sp1-1/crmd/70, version=0.5.16)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.16 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.17 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=17
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_lrm_query_internal
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ /cib/status/node_state[@id='2']: <lrm id="2"/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ <lrm_resources/>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: ++ </lrm>
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/71, version=0.5.17)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section nodes to all (origin=local/crmd/74)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section status to all (origin=local/crmd/75)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Forwarding cib_modify operation for section cib to all (origin=local/crmd/76)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section nodes: OK (rc=0, origin=15sp1-1/crmd/74, version=0.5.17)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: --- 0.5.17 2
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: Diff: +++ 0.5.18 (null)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib: @num_updates=18
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='1']: @crm-debug-origin=do_state_transition
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_perform_op) info: + /cib/status/node_state[@id='2']: @crm-debug-origin=do_state_transition, @join=member, @expected=member
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section status: OK (rc=0, origin=15sp1-1/crmd/75, version=0.5.18)
+Apr 03 11:03:34 15sp1-1 pacemaker-based [1943] (cib_process_request) info: Completed cib_modify operation for section cib: OK (rc=0, origin=15sp1-1/crmd/76, version=0.5.18)
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 4, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 4 (ref=pe_calc-dc-1554260614-32) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 4 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_backup) info: Archived previous version as /var/lib/pacemaker/cib/cib-5.raw
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Wrote version 0.5.0 of the CIB to disk (digest: ca3eacfa6d368fd79cf391411a7d16de)
+Apr 03 11:03:35 15sp1-1 pacemaker-based [1943] (cib_file_write_with_digest) info: Reading cluster configuration file /var/lib/pacemaker/cib/cib.F8yvtW (digest: /var/lib/pacemaker/cib/cib.iAVwFF)
+Apr 03 11:03:39 15sp1-1 pacemaker-based [1943] (cib_process_ping) info: Reporting our current digest to 15sp1-1: 5d5b6ac1becdd43a5327925a8d1f5579 for 0.5.18 (0x56042ffb12f0 0)
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 5, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 5 (ref=pe_calc-dc-1554261514-33) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 5 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 6, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 6 (ref=pe_calc-dc-1554262414-34) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 6 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 11:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 7, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 7 (ref=pe_calc-dc-1554263314-35) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 7 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 11:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 8, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 8 (ref=pe_calc-dc-1554264214-36) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 8 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 9, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 9 (ref=pe_calc-dc-1554265114-37) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 9 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 10, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 10 (ref=pe_calc-dc-1554266014-38) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 10 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 12:48:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 11, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 11 (ref=pe_calc-dc-1554266914-39) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 11 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 12:48:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:03:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 12, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 12 (ref=pe_calc-dc-1554267814-40) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 12 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:03:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:18:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 13, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 13 (ref=pe_calc-dc-1554268714-41) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 13 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:18:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (crm_timer_popped) info: Cluster Recheck Timer (I_PE_CALC) just popped (900000ms)
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_IDLE -> S_POLICY_ENGINE | input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) info: Input has not changed since last time, not saving to disk
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-1 is online
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (determine_online_status) info: Node 15sp1-2 is online
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 14, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 14 (ref=pe_calc-dc-1554269614-42) derived from /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 14 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-4.bz2): Complete
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE from notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:37:23 15sp1-1 pacemaker-controld [1948] (handle_ping) notice: Current ping state: S_IDLE
diff --git a/test/unittests/pacemaker.log.2 b/test/unittests/pacemaker.log.2
new file mode 100644
index 0000000..bd189cc
--- /dev/null
+++ b/test/unittests/pacemaker.log.2
@@ -0,0 +1,3 @@
+Jan 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 joined
+Jan 03 11:03:41 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 1 (15sp1-1) is member
+Jan 03 11:03:51 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2
diff --git a/test/unittests/pacemaker_unicode.log b/test/unittests/pacemaker_unicode.log
new file mode 100644
index 0000000..47aaa31
--- /dev/null
+++ b/test/unittests/pacemaker_unicode.log
@@ -0,0 +1,30 @@
+Set r/w permissions for uid=90, gid=90 on /var/log/pacemaker/pacemaker.log
+� ∀↱
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (crm_log_init) info: Changed active directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (get_cluster_type) info: ¶ an ⅓ 'corosync' cluster
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (mcp_read_config) info: ⚽ configure for stack: corosync
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) notice: → Pacemaker 2.0.1+20190304.9e909a5bd-1.4 | build=2.0.1+20190304.9e909a5bd features: generated-manpages agent-manpages ncurses libqb-logging libqb-ipc lha-fencing systemd nagios corosync-native atomic-attrd acls cibsecrets
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (main) info: Maximum core file size is: 18446744073709551615Ḽơᶉëᶆ ȋṕšᶙṁ ḍỡḽǭᵳ ʂǐť ӓṁệẗ, ĉṓɲṩḙċťᶒțûɾ ấɖḯƥĭṩčįɳġ ḝłįʈ, șếᶑ ᶁⱺ ẽḭŭŝḿꝋď ṫĕᶆᶈṓɍ ỉñḉīḑȋᵭṵńť ṷŧ ḹẩḇőꝛế éȶ đꝍꞎôꝛȇ ᵯáꞡᶇā ąⱡîɋṹẵ.
+Apr 03 11:01:18 15sp1-1 pacemakerd [1941] (qb_ipcs_us_publish) info: 你好 \xf0\x28\x8c\x28: pac我很特殊
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (qb_ipcs_us_publish) info: κόσμε
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: a a 𐀀
+Apr 03 11:01:18 15sp1-1 pacemaker-execd [1945] (main) info: \xc3\x28 a 𐀀
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (crm_log_init) info: � d�ectory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-sche,̆dulerd[1947] (qb_ipcs_us_publish) info: 𐀀 name: pengine
+Apr 03 11:01:18 15sp1-1 pacemaker-schedulerd[1947] (main) info: �����r-schedulerd
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (crm_log_init) info: ������ directory to /var/lib/pacemaker/cores
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: Verifying cluster type: 'corosync' ������
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (get_cluster_type) info: � � � � � � � � � � � � � � � �
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (pcmk__daemon_can_write) notice: �����������������������������
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (retrieveCib) info: Reading cluster\xa0\xa1 configuration file /var/lib/pacemaker/cib/cib.xml (﷐﷑﷒﷓﷔﷕﷖﷗﷘﷙﷚﷛﷜﷝﷞﷟﷠﷡﷢﷣﷤﷥﷦﷧﷨﷩﷪﷫﷬﷭﷮﷯digest: /var/lib/pacemaker/cib/cib.xml.sig)
+Apr 03 11:01:18 15sp1-1 pacemaker-based [1943] (cib_file_read_and_verify) warning: Could not verify cluster configuration file /var/lib/pacemaker/cib/cib.xml: No such file or directory \xF0\xA4\xAD (2)
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 1 is already processed "\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD\xA2"
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (unpack_node_loop) info: Node 2 is already processed "\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD\xA2"."\xF0\xA4\xAD"
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (stage6) notice: Delaying fencing operations until there are resources to manage
+Apr 03 13:33:34 15sp1-1 pacemaker-schedulerd[1947] (process_pe_message) notice: Calculated transition 14, saving inputs in /var/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) info: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE | input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_te_invoke) notice: Processing graph 14 (ref=pe_calc-dc-1554269614-42) derived from /v \xf0\x90\x8c\xb car/lib/pacemaker/pengine/pe-input-4.bz2
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (run_graph) notice: Transition 14 (Complete=0, Pending=0,åabc Fired=0, Skipped=0, \xf0\x28\x8c\xbc Incomplete=0, Source=/var/lib/pacemake \xf0\x90\x28\xbcr/pengine/pe-input-4.bz2): Complete
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_log) info: Input I_TE_SUCCESS received in state S_TRANSITION_ENGINE fro\xf8\xa1\xa1\xa1\xa1m \xf0\x28\x8c\x28notify_crmd
+Apr 03 13:33:34 15sp1-1 pacemaker-controld [1948] (do_state_transition) notice: State transition \xfc\xa1\xa1\xa1\xa1\xa1S_TRANSITION_ENGINE -> S_IDLE | input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd
+Apr 03 13:37:23 15sp1-1 pacemaker-controld [1948] (handle_ping) notice: \xfc\xa1\xa1\xa1\xa1\xa1 test_unicode
diff --git a/test/unittests/schemas/acls-1.1.rng b/test/unittests/schemas/acls-1.1.rng
new file mode 100644
index 0000000..22cc631
--- /dev/null
+++ b/test/unittests/schemas/acls-1.1.rng
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-acls"/>
+ </start>
+
+ <define name="element-acls">
+ <element name="acls">
+ <zeroOrMore>
+ <choice>
+ <element name="acl_user">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <zeroOrMore>
+ <element name="role_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </zeroOrMore>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </choice>
+ </element>
+ <element name="acl_role">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-acl">
+ <choice>
+ <element name="read">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="write">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="deny">
+ <ref name="attribute-acl"/>
+ </element>
+ </choice>
+ </define>
+
+ <define name="attribute-acl">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ </group>
+ <attribute name="xpath"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="attribute"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/acls-1.2.rng b/test/unittests/schemas/acls-1.2.rng
new file mode 100644
index 0000000..22cc631
--- /dev/null
+++ b/test/unittests/schemas/acls-1.2.rng
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-acls"/>
+ </start>
+
+ <define name="element-acls">
+ <element name="acls">
+ <zeroOrMore>
+ <choice>
+ <element name="acl_user">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <zeroOrMore>
+ <element name="role_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </zeroOrMore>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </choice>
+ </element>
+ <element name="acl_role">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <ref name="element-acl"/>
+ </zeroOrMore>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-acl">
+ <choice>
+ <element name="read">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="write">
+ <ref name="attribute-acl"/>
+ </element>
+ <element name="deny">
+ <ref name="attribute-acl"/>
+ </element>
+ </choice>
+ </define>
+
+ <define name="attribute-acl">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="tag"><text/></attribute>
+ <attribute name="ref"><data type="IDREF"/></attribute>
+ </group>
+ <attribute name="xpath"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="attribute"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.0.rng b/test/unittests/schemas/constraints-1.0.rng
new file mode 100644
index 0000000..5a4474a
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.0.rng
@@ -0,0 +1,180 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <choice>
+ <group>
+ <externalRef href="score.rng"/>
+ <attribute name="node"><text/></attribute>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.1.rng b/test/unittests/schemas/constraints-1.1.rng
new file mode 100644
index 0000000..fff0fb7
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.1.rng
@@ -0,0 +1,246 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ <ref name="element-rsc_ticket"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="rsc-pattern"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ </choice>
+ <choice>
+ <group>
+ <choice>
+ <attribute name="domain"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="node"><text/></attribute>
+ <externalRef href="score.rng"/>
+ </group>
+ </choice>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="require-all"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-instance"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-instance"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="first-instance"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="then-instance"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-rsc_ticket">
+ <element name="rsc_ticket">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ <attribute name="ticket"><text/></attribute>
+ <optional>
+ <attribute name="loss-policy">
+ <choice>
+ <value>stop</value>
+ <value>demote</value>
+ <value>fence</value>
+ <value>freeze</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/constraints-1.2.rng b/test/unittests/schemas/constraints-1.2.rng
new file mode 100644
index 0000000..221140c
--- /dev/null
+++ b/test/unittests/schemas/constraints-1.2.rng
@@ -0,0 +1,219 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-constraints"/>
+ </start>
+
+ <define name="element-constraints">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-location"/>
+ <ref name="element-colocation"/>
+ <ref name="element-order"/>
+ <ref name="element-rsc_ticket"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-location">
+ <element name="rsc_location">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <choice>
+ <group>
+ <externalRef href="score.rng"/>
+ <attribute name="node"><text/></attribute>
+ </group>
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </choice>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ </element>
+ </define>
+
+ <define name="element-resource-set">
+ <element name="resource_set">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="sequential"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="require-all"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <oneOrMore>
+ <element name="resource_ref">
+ <attribute name="id"><data type="IDREF"/></attribute>
+ </element>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-colocation">
+ <element name="rsc_colocation">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ <attribute name="score-attribute-mangle"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <attribute name="with-rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="node-attribute"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="with-rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-order">
+ <element name="rsc_order">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <ref name="element-lifetime"/>
+ </optional>
+ <optional>
+ <attribute name="symmetrical"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
+ </optional>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="first"><data type="IDREF"/></attribute>
+ <attribute name="then"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="first-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="then-action">
+ <ref name="attribute-actions"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="element-rsc_ticket">
+ <element name="rsc_ticket">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <oneOrMore>
+ <ref name="element-resource-set"/>
+ </oneOrMore>
+ <group>
+ <attribute name="rsc"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="rsc-role">
+ <ref name="attribute-roles"/>
+ </attribute>
+ </optional>
+ </group>
+ </choice>
+ <attribute name="ticket"><text/></attribute>
+ <optional>
+ <attribute name="loss-policy">
+ <choice>
+ <value>stop</value>
+ <value>demote</value>
+ <value>fence</value>
+ <value>freeze</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+
+ <define name="attribute-actions">
+ <choice>
+ <value>start</value>
+ <value>promote</value>
+ <value>demote</value>
+ <value>stop</value>
+ </choice>
+ </define>
+
+ <define name="attribute-roles">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Master</value>
+ <value>Slave</value>
+ </choice>
+ </define>
+
+ <define name="order-types">
+ <choice>
+ <value>Optional</value>
+ <value>Mandatory</value>
+ <value>Serialize</value>
+ </choice>
+ </define>
+
+ <define name="element-lifetime">
+ <element name="lifetime">
+ <oneOrMore>
+ <externalRef href="rule.rng"/>
+ </oneOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/fencing.rng b/test/unittests/schemas/fencing.rng
new file mode 100644
index 0000000..87de5a8
--- /dev/null
+++ b/test/unittests/schemas/fencing.rng
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-stonith"/>
+ </start>
+
+ <define name="element-stonith">
+ <element name="fencing-topology">
+ <zeroOrMore>
+ <ref name="element-level"/>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-level">
+ <element name="fencing-level">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="target"><text/></attribute>
+ <attribute name="index"><data type="positiveInteger"/></attribute>
+ <attribute name="devices">
+ <data type="string">
+ <param name="pattern">([a-zA-Z0-9_\.\-]+)(,[a-zA-Z0-9_\.\-]+)*</param>
+ </data>
+ </attribute>
+ </element>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/nvset.rng b/test/unittests/schemas/nvset.rng
new file mode 100644
index 0000000..0d7e72c
--- /dev/null
+++ b/test/unittests/schemas/nvset.rng
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-nvset"/>
+ </start>
+
+ <define name="element-nvset">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <interleave>
+ <optional>
+ <externalRef href="rule.rng"/>
+ </optional>
+ <zeroOrMore>
+ <element name="nvpair">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ </element>
+ </zeroOrMore>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ </interleave>
+ </group>
+ </choice>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.0.rng b/test/unittests/schemas/pacemaker-1.0.rng
new file mode 100644
index 0000000..7100393
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.0.rng
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.0.rng"/>
+ </element>
+ <element name="constraints">
+ <externalRef href="constraints-1.0.rng"/>
+ </element>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.1.rng b/test/unittests/schemas/pacemaker-1.1.rng
new file mode 100644
index 0000000..50e9458
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.1.rng
@@ -0,0 +1,161 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.1.rng"/>
+ </element>
+ <optional>
+ <element name="domains">
+ <zeroOrMore>
+ <element name="domain">
+ <attribute name="id"><data type="ID"/></attribute>
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="name"><text/></attribute>
+ <externalRef href="score.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <element name="constraints">
+ <externalRef href="constraints-1.1.rng"/>
+ </element>
+ <optional>
+ <externalRef href="acls-1.1.rng"/>
+ </optional>
+ <optional>
+ <externalRef href="fencing.rng"/>
+ </optional>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-origin"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-client"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-user"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/pacemaker-1.2.rng b/test/unittests/schemas/pacemaker-1.2.rng
new file mode 100644
index 0000000..33a7d2d
--- /dev/null
+++ b/test/unittests/schemas/pacemaker-1.2.rng
@@ -0,0 +1,146 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="cib">
+ <ref name="element-cib"/>
+ </element>
+ </start>
+
+ <define name="element-cib">
+ <ref name="attribute-options"/>
+ <element name="configuration">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <ref name="element-nodes"/>
+ <element name="resources">
+ <externalRef href="resources-1.2.rng"/>
+ </element>
+ <element name="constraints">
+ <externalRef href="constraints-1.2.rng"/>
+ </element>
+ <optional>
+ <externalRef href="acls-1.2.rng"/>
+ </optional>
+ <optional>
+ <externalRef href="fencing.rng"/>
+ </optional>
+ </interleave>
+ </element>
+ <element name="status">
+ <ref name="element-status"/>
+ </element>
+ </define>
+
+ <define name="attribute-options">
+ <externalRef href="versions.rng"/>
+ <optional>
+ <attribute name="crm_feature_set"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-tls-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="remote-clear-port"><data type="nonNegativeInteger"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="have-quorum"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="dc-uuid"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="cib-last-written"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="no-quorum-panic"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-origin"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-client"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="update-user"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>normal</value>
+ <value>member</value>
+ <value>ping</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <define name="element-status">
+ <zeroOrMore>
+ <choice>
+ <attribute>
+ <anyName/>
+ <text/>
+ </attribute>
+ <element>
+ <anyName/>
+ <ref name="element-status"/>
+ </element>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.0.rng b/test/unittests/schemas/resources-1.0.rng
new file mode 100644
index 0000000..7ea2228
--- /dev/null
+++ b/test/unittests/schemas/resources-1.0.rng
@@ -0,0 +1,177 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.1.rng b/test/unittests/schemas/resources-1.1.rng
new file mode 100644
index 0000000..81a8f82
--- /dev/null
+++ b/test/unittests/schemas/resources-1.1.rng
@@ -0,0 +1,225 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/resources-1.2.rng b/test/unittests/schemas/resources-1.2.rng
new file mode 100644
index 0000000..81a8f82
--- /dev/null
+++ b/test/unittests/schemas/resources-1.2.rng
@@ -0,0 +1,225 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ </choice>
+ </attribute>
+ </choice>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="requires">
+ <choice>
+ <value>nothing</value>
+ <value>quorum</value>
+ <value>fencing</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/rule.rng b/test/unittests/schemas/rule.rng
new file mode 100644
index 0000000..242eff8
--- /dev/null
+++ b/test/unittests/schemas/rule.rng
@@ -0,0 +1,137 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ xmlns:ann="http://relaxng.org/ns/compatibility/annotations/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-rule"/>
+ </start>
+
+ <define name="element-rule">
+ <element name="rule">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="boolean-op">
+ <choice>
+ <value>or</value>
+ <value>and</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role"><text/></attribute>
+ </optional>
+ <oneOrMore>
+ <choice>
+ <element name="expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="attribute"><text/></attribute>
+ <attribute name="operation">
+ <choice>
+ <value>lt</value>
+ <value>gt</value>
+ <value>lte</value>
+ <value>gte</value>
+ <value>eq</value>
+ <value>ne</value>
+ <value>defined</value>
+ <value>not_defined</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="type" ann:defaultValue="string">
+ <choice>
+ <value>string</value>
+ <value>number</value>
+ <value>version</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ <element name="date_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="operation"><value>in_range</value></attribute>
+ <choice>
+ <group>
+ <optional>
+ <attribute name="start"><text/></attribute>
+ </optional>
+ <attribute name="end"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="start"><text/></attribute>
+ <element name="duration">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>gt</value></attribute>
+ <attribute name="start"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="operation"><value>lt</value></attribute>
+ <choice>
+ <attribute name="end"><text/></attribute>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>date_spec</value></attribute>
+ <element name="date_spec">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </element>
+ <ref name="element-rule"/>
+ </choice>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="date-common">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="hours"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="monthdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="yearsdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="months"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weeks"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="years"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekyears"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="moon"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
diff --git a/test/unittests/schemas/score.rng b/test/unittests/schemas/score.rng
new file mode 100644
index 0000000..57b10f2
--- /dev/null
+++ b/test/unittests/schemas/score.rng
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="attribute-score"/>
+ </start>
+
+ <define name="attribute-score">
+ <attribute name="score">
+ <choice>
+ <data type="integer"/>
+ <value>INFINITY</value>
+ <value>+INFINITY</value>
+ <value>-INFINITY</value>
+ </choice>
+ </attribute>
+ </define>
+</grammar>
diff --git a/test/unittests/schemas/versions.rng b/test/unittests/schemas/versions.rng
new file mode 100644
index 0000000..ab4e4ea
--- /dev/null
+++ b/test/unittests/schemas/versions.rng
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="attribute-version"/>
+ </start>
+
+ <define name="attribute-version">
+ <attribute name="validate-with">
+ <choice>
+ <value>none</value>
+ <value>pacemaker-0.6</value>
+ <value>transitional-0.6</value>
+ <value>pacemaker-0.7</value>
+ <value>pacemaker-1.0</value>
+ <value>pacemaker-1.1</value>
+ <value>pacemaker-1.2</value>
+ </choice>
+ </attribute>
+ <attribute name="admin_epoch"><data type="nonNegativeInteger"/></attribute>
+ <attribute name="epoch"><data type="nonNegativeInteger"/></attribute>
+ <attribute name="num_updates"><data type="nonNegativeInteger"/></attribute>
+ </define>
+</grammar>
diff --git a/test/unittests/scripts/inc1/main.yml b/test/unittests/scripts/inc1/main.yml
new file mode 100644
index 0000000..8c290d3
--- /dev/null
+++ b/test/unittests/scripts/inc1/main.yml
@@ -0,0 +1,22 @@
+version: 2.2
+shortdesc: Include test script 1
+longdesc: Test if includes work ok
+parameters:
+ - name: foo
+ type: boolean
+ shortdesc: An optional feature
+ - name: bar
+ type: string
+ shortdesc: A string of characters
+ value: the name is the game
+ - name: is-required
+ type: int
+ required: true
+actions:
+ - call: ls /tmp
+ when: foo
+ shortdesc: ls
+ - call: "echo '{{foo}}'"
+ shortdesc: foo
+ - call: "echo '{{bar}}'"
+ shortdesc: bar
diff --git a/test/unittests/scripts/inc2/main.yml b/test/unittests/scripts/inc2/main.yml
new file mode 100644
index 0000000..4910696
--- /dev/null
+++ b/test/unittests/scripts/inc2/main.yml
@@ -0,0 +1,26 @@
+---
+- version: 2.2
+ shortdesc: Includes another script
+ longdesc: This one includes another script
+ parameters:
+ - name: wiz
+ type: string
+ - name: foo
+ type: boolean
+ shortdesc: A different foo
+ include:
+ - script: inc1
+ name: included-script
+ parameters:
+ - name: is-required
+ value: 33
+ actions:
+ - call: "echo 'before {{wiz}}'"
+ shortdesc: before wiz
+ - include: included-script
+ - call: "echo 'after {{foo}}'"
+ shortdesc: after foo
+ - cib: |
+ {{included-script:is-required}}
+ - cib: |
+ {{wiz}}
diff --git a/test/unittests/scripts/legacy/main.yml b/test/unittests/scripts/legacy/main.yml
new file mode 100644
index 0000000..ef5d35b
--- /dev/null
+++ b/test/unittests/scripts/legacy/main.yml
@@ -0,0 +1,52 @@
+---
+- name: Initialize a new cluster
+ description: >
+ Initializes a new cluster on the nodes provided. Will try to
+ configure SSH if not already configured, and install missing
+ packages.
+
+ A more user-friendly interface to this script is provided by the
+ cluster init command.
+ parameters:
+ - name: iface
+ description: "Use the given interface. Try to auto-detect interface by default."
+ default: ""
+
+ - name: transport
+ description: "Corosync transport (mcast or udpu)"
+ default: "udpu"
+
+ - name: bindnetaddr
+ description: "Network address to bind to (e.g.: 192.168.1.0)"
+ default: ""
+
+ - name: mcastaddr
+ description: "Multicast address (e.g.: 239.x.x.x)"
+ default: ""
+
+ - name: mcastport
+ description: "Multicast port"
+ default: 5405
+
+ steps:
+ - name: Configure SSH
+ apply_local: configure.py ssh
+
+ - name: Check state of nodes
+ collect: collect.py
+
+ - name: Verify parameters
+ validate: verify.py
+
+ - name: Install packages
+ apply: configure.py install
+
+ - name: Generate corosync authkey
+ apply_local: authkey.py
+
+ - name: Configure cluster nodes
+ apply: configure.py corosync
+
+ - name: Initialize cluster
+ apply_local: init.py
+
diff --git a/test/unittests/scripts/templates/apache.xml b/test/unittests/scripts/templates/apache.xml
new file mode 100644
index 0000000..faf3ef0
--- /dev/null
+++ b/test/unittests/scripts/templates/apache.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<template name="apache">
+
+<shortdesc lang="en">Apache Web Server</shortdesc>
+<longdesc lang="en">
+Create a single primitive resource of type apache.
+</longdesc>
+
+<parameters>
+
+<parameter name="id" required="1">
+<shortdesc lang="en">Resource ID</shortdesc>
+<longdesc lang="en">
+Unique ID for this Apache resource in the cluster.
+</longdesc>
+<content type="string" default="apache"/>
+</parameter>
+
+<parameter name="configfile" required="1">
+<shortdesc lang="en">Apache config file</shortdesc>
+<longdesc lang="en">
+Full pathname of the Apache configuration file</longdesc>
+<content type="string" default="/etc/apache2/httpd.conf"/>
+</parameter>
+
+</parameters>
+
+<crm_script>
+primitive <insert param="id"/> ocf:heartbeat:apache
+ params
+ configfile="<insert param="configfile"/>"
+ op start timeout="40" op stop timeout="60"
+ op monitor interval="10" timeout="20"
+</crm_script>
+
+</template>
diff --git a/test/unittests/scripts/templates/virtual-ip.xml b/test/unittests/scripts/templates/virtual-ip.xml
new file mode 100644
index 0000000..22ab5bf
--- /dev/null
+++ b/test/unittests/scripts/templates/virtual-ip.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<template name="virtual-ip">
+
+<shortdesc lang="en">Virtual IP Address</shortdesc>
+<longdesc lang="en">
+Create a single primitive resource of type IPaddr2.
+</longdesc>
+
+<parameters>
+
+<parameter name="id" required="1">
+<shortdesc lang="en">Resource ID</shortdesc>
+<longdesc lang="en">
+Unique ID for this virtual IP address resource in the cluster.
+</longdesc>
+<content type="string" default="virtual-ip"/>
+</parameter>
+
+<parameter name="ip" required="1">
+<shortdesc lang="en">IP address</shortdesc>
+<longdesc lang="en">
+The IPv4 address to be configured in dotted quad notation,
+for example "192.168.1.1".
+</longdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="netmask">
+<shortdesc lang="en">Netmask</shortdesc>
+<longdesc lang="en">
+The netmask for the interface in CIDR format
+(e.g., 24 and not 255.255.255.0).
+
+If unspecified, it will be determined automatically.
+</longdesc>
+<content type="string"/>
+</parameter>
+
+<parameter name="lvs_support">
+<shortdesc lang="en">LVS support</shortdesc>
+<longdesc lang="en">
+Enable support for LVS Direct Routing configurations. In case a IP
+address is stopped, only move it to the loopback device to allow the
+local node to continue to service requests, but no longer advertise it
+on the network.
+</longdesc>
+<content type="boolean"/>
+</parameter>
+
+</parameters>
+
+<crm_script>
+primitive <insert param="id"/> ocf:heartbeat:IPaddr2
+ params
+ ip="<insert param="ip"/>"
+ <if set="netmask">cidr_netmask="<insert param="netmask"/>"</if>
+ <if set="lvs_support">lvs_support="<insert param="lvs_support"/>"</if>
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+</crm_script>
+
+</template>
diff --git a/test/unittests/scripts/unified/main.yml b/test/unittests/scripts/unified/main.yml
new file mode 100644
index 0000000..29f5d07
--- /dev/null
+++ b/test/unittests/scripts/unified/main.yml
@@ -0,0 +1,26 @@
+version: 2.2
+shortdesc: Unified Script
+longdesc: >
+ Test if we can define multiple steps in a single script
+category: test
+steps:
+ - parameters:
+ - name: id
+ type: resource
+ required: true
+ shortdesc: Identifier
+ - name: vip
+ shortdesc: Configure the virtual IP
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ shortdesc: IP Identifier
+ - name: ip
+ type: ip_address
+ required: true
+ shortdesc: The IP Address
+actions:
+ - cib: |
+ primitive {{vip:id}} IPaddr2 ip={{vip:ip}}
+ group g-{{id}} {{id}} {{vip:id}}
diff --git a/test/unittests/scripts/v2/main.yml b/test/unittests/scripts/v2/main.yml
new file mode 100644
index 0000000..41822a2
--- /dev/null
+++ b/test/unittests/scripts/v2/main.yml
@@ -0,0 +1,46 @@
+---
+- version: 2.2
+ shortdesc: Apache Webserver
+ longdesc: >
+ Configure a resource group containing a virtual IP address and
+ an instance of the Apache web server.
+ category: Server
+ parameters:
+ - name: id
+ shortdesc: The ID specified here is for the web server resource group.
+ - name: install
+ type: boolean
+ value: true
+ shortdesc: Disable if no installation should be performed
+ include:
+ - agent: test:apache
+ parameters:
+ - name: id
+ value: "{{id}}-server"
+ - name: configfile
+ type: file
+ ops: |
+ op monitor interval=20s timeout=20s
+ - agent: test:virtual-ip
+ name: virtual-ip
+ parameters:
+ - name: id
+ value: "{{id}}-ip"
+ - name: ip
+ type: ip_address
+ ops: |
+ op monitor interval=20s timeout=20s
+ actions:
+ - install:
+ - apache2
+ when: install
+ - call: a2enable mod_status
+ shortdesc: Enable status module
+ nodes: all
+ when: install
+ - cib: |
+ {{virtual-ip}}
+ {{apache}}
+ group {{id}}
+ {{virtual-ip:id}}
+ {{apache:id}}
diff --git a/test/unittests/scripts/vip/main.yml b/test/unittests/scripts/vip/main.yml
new file mode 100644
index 0000000..4f3bde1
--- /dev/null
+++ b/test/unittests/scripts/vip/main.yml
@@ -0,0 +1,28 @@
+---
+- version: 2.2
+ shortdesc: Virtual IP
+ category: Basic
+ include:
+ - agent: test:virtual-ip
+ name: virtual-ip
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: ip
+ type: ip_address
+ required: true
+ - name: cidr_netmask
+ type: integer
+ required: false
+ - name: broadcast
+ type: ipaddress
+ required: false
+ - name: lvs_support
+ required: false
+ type: boolean
+ ops: |
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+ actions:
+ - include: virtual-ip
diff --git a/test/unittests/scripts/vipinc/main.yml b/test/unittests/scripts/vipinc/main.yml
new file mode 100644
index 0000000..6741885
--- /dev/null
+++ b/test/unittests/scripts/vipinc/main.yml
@@ -0,0 +1,14 @@
+version: 2.2
+category: Test
+shortdesc: Test script include
+include:
+ - script: vip
+ parameters:
+ - name: id
+ value: vip1
+ - name: ip
+ value: 192.168.200.100
+actions:
+ - include: vip
+ - cib: |
+ clone c-{{vip:id}} {{vip:id}}
diff --git a/test/unittests/scripts/workflows/10-webserver.xml b/test/unittests/scripts/workflows/10-webserver.xml
new file mode 100644
index 0000000..f18d55a
--- /dev/null
+++ b/test/unittests/scripts/workflows/10-webserver.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0"?>
+<workflow name="10-webserver">
+
+<shortdesc lang="en">Web Server</shortdesc>
+<longdesc lang="en">
+Configure a resource group containing a virtual IP address and
+an instance of the Apache web server. You may wish to use this
+in conjunction with a filesystem resource; in this case you will
+need to separately configure the filesystem then add colocation
+and ordering constraints to have it start before the resource
+group you create here.
+</longdesc>
+
+<parameters>
+<stepdesc lang="en">
+The ID specified here is for the web server resource group.
+</stepdesc>
+<parameter name="id" required="1">
+<shortdesc lang="en">Group ID</shortdesc>
+<longdesc lang="en">
+Unique ID for the web server resource group in the cluster.
+</longdesc>
+<content type="string" default="web-server"/>
+</parameter>
+</parameters>
+
+<templates>
+<template name="virtual-ip" required="1">
+<stepdesc lang="en">
+The IP address configured here will start before the Apache instance.
+</stepdesc>
+</template>
+<template name="apache" required="1">
+<stepdesc lang="en">
+The Apache configuration file specified here must be available via the
+same path on all cluster nodes, and Apache must be configured with
+mod_status enabled. If in doubt, try running Apache manually via
+its init script first, and ensure http://localhost:80/server-status is
+accessible.
+</stepdesc>
+</template>
+</templates>
+
+<crm_script>
+group <insert param="id"/>
+ <insert param="id" from_template="virtual-ip"/>
+ <insert param="id" from_template="apache"/>
+</crm_script>
+
+</workflow>
diff --git a/test/unittests/test.conf b/test/unittests/test.conf
new file mode 100644
index 0000000..fe75686
--- /dev/null
+++ b/test/unittests/test.conf
@@ -0,0 +1,12 @@
+[path]
+sharedir = ../../doc
+cache = ../../doc
+crm_config = .
+crm_daemon_dir = .
+crm_daemon_user = hacluster
+ocf_root = .
+crm_dtd_dir = .
+pe_state_dir = .
+heartbeat_dir = .
+hb_delnode = ./hb_delnode
+nagios_plugins = .
diff --git a/test/unittests/test_bootstrap.py b/test/unittests/test_bootstrap.py
new file mode 100644
index 0000000..45bf03d
--- /dev/null
+++ b/test/unittests/test_bootstrap.py
@@ -0,0 +1,1905 @@
+"""
+Unitary tests for crmsh/bootstrap.py
+
+:author: xinliang
+:organization: SUSE Linux GmbH
+:contact: XLiang@suse.de
+
+:since: 2019-10-21
+"""
+
+# pylint:disable=C0103,C0111,W0212,W0611
+
+import subprocess
+import unittest
+import yaml
+import socket
+
+import crmsh.sh
+import crmsh.ssh_key
+import crmsh.user_of_host
+import crmsh.utils
+from crmsh.ui_node import NodeMgmt
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import constants
+from crmsh import qdevice
+
+
+class TestContext(unittest.TestCase):
+ """
+ Unitary tests for crmsh.bootstrap.Context
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ctx_inst = bootstrap.Context()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.bootstrap.Context.initialize_user')
+ def test_set_context(self, mock_initialize_user: mock.MagicMock):
+ options = mock.Mock(yes_to_all=True, ipv6=False)
+ ctx = self.ctx_inst.set_context(options)
+ self.assertEqual(ctx.yes_to_all, True)
+ self.assertEqual(ctx.ipv6, False)
+ mock_initialize_user.assert_called_once()
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice_return(self, mock_qdevice):
+ self.ctx_inst.initialize_qdevice()
+ mock_qdevice.assert_not_called()
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice(self, mock_qdevice):
+ ctx = crmsh.bootstrap.Context()
+ ctx.qnetd_addr = "node3"
+ ctx.qdevice_port = 123
+ ctx.stage = ""
+ ctx.initialize_qdevice()
+ mock_qdevice.assert_called_once_with('node3', port=123, ssh_user=None, algo=None, tie_breaker=None, tls=None, cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ def test_initialize_qdevice_with_user(self, mock_qdevice):
+ ctx = crmsh.bootstrap.Context()
+ ctx.qnetd_addr = "alice@node3"
+ ctx.qdevice_port = 123
+ ctx.stage = ""
+ ctx.initialize_qdevice()
+ mock_qdevice.assert_called_once_with('node3', port=123, ssh_user='alice', algo=None, tie_breaker=None, tls=None, cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_sbd_option_error_together(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.sbd_devices = ["/dev/sda1"]
+ ctx.diskless_sbd = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Can't use -s and -S options together")
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_sbd_option_error_sbd_stage_no_option(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.yes_to_all = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Stage sbd should specify sbd device by -s or diskless sbd by -S option")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_validate_sbd_option_error_sbd_stage_service(self, mock_active, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.diskless_sbd = True
+ mock_active.return_value = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_sbd_option()
+ mock_error.assert_called_once_with("Can't configure stage sbd: sbd.service already running! Please use crm option '-F' if need to redeploy")
+ mock_active.assert_called_once_with("sbd.service")
+
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_validate_sbd_option_error_sbd_stage(self, mock_active, mock_check_all):
+ options = mock.Mock(stage="sbd", diskless_sbd=True, cluster_is_running=True)
+ ctx = crmsh.bootstrap.Context()
+ ctx.stage = "sbd"
+ ctx.diskless_sbd = True
+ ctx.cluster_is_running = True
+ mock_active.return_value = False
+ ctx._validate_sbd_option()
+ mock_active.assert_called_once_with("sbd.service")
+ mock_check_all.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_validate_option_error_nic_number(self, mock_error):
+ mock_error.side_effect = SystemExit
+ ctx = crmsh.bootstrap.Context()
+ ctx.nic_list = ["eth1", "eth2", "eth3"]
+ with self.assertRaises(SystemExit):
+ ctx.validate_option()
+ mock_error.assert_called_once_with("Maximum number of interface is 2")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('socket.gethostbyname')
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ def test_validate_cluster_node_same_name(self, mock_ip_in_local, mock_gethost, mock_fatal):
+ options = mock.Mock(cluster_node="me", type="join")
+ ctx = crmsh.bootstrap.Context()
+ ctx.cluster_node = "me"
+ ctx.type = "join"
+ mock_fatal.side_effect = SystemExit
+ mock_gethost.return_value = ("10.10.10.41", None)
+ mock_ip_in_local.return_value = True
+ with self.assertRaises(SystemExit):
+ ctx._validate_cluster_node()
+ mock_fatal.assert_called_once_with("Please specify peer node's hostname or IP address")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('socket.gethostbyname')
+ def test_validate_cluster_node_unknown_name(self, mock_gethost, mock_fatal):
+ ctx = crmsh.bootstrap.Context()
+ ctx.cluster_node = "xxxx"
+ ctx.type = "join"
+ mock_fatal.side_effect = SystemExit
+ mock_gethost.side_effect = socket.gaierror("gethostbyname error")
+ with self.assertRaises(SystemExit):
+ ctx._validate_cluster_node()
+ mock_fatal.assert_called_once_with('"xxxx": gethostbyname error')
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.bootstrap.Validation.valid_admin_ip')
+ def test_validate_option(self, mock_admin_ip, mock_warn):
+ ctx = crmsh.bootstrap.Context()
+ ctx.admin_ip = "10.10.10.123"
+ ctx.qdevice_inst = mock.Mock()
+ ctx._validate_sbd_option = mock.Mock()
+ ctx._validate_nodes_option = mock.Mock()
+ ctx.validate_option()
+ mock_admin_ip.assert_called_once_with("10.10.10.123")
+ ctx.qdevice_inst.valid_qdevice_options.assert_called_once_with()
+ ctx._validate_sbd_option.assert_called_once_with()
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile_return(self, mock_status):
+ res = self.ctx_inst.load_specific_profile(None)
+ assert res == {}
+ mock_status.assert_not_called()
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile_not_exist(self, mock_status):
+ self.ctx_inst.profiles_data = {"name": "test"}
+ res = self.ctx_inst.load_specific_profile("newname")
+ assert res == {}
+ mock_status.assert_called_once_with("\"newname\" profile does not exist in {}".format(bootstrap.PROFILES_FILE))
+
+ @mock.patch('logging.Logger.info')
+ def test_load_specific_profile(self, mock_status):
+ self.ctx_inst.profiles_data = {"name": "test"}
+ res = self.ctx_inst.load_specific_profile("name")
+ assert res == "test"
+ mock_status.assert_called_once_with("Loading \"name\" profile from {}".format(bootstrap.PROFILES_FILE))
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.detect_cloud')
+ @mock.patch('os.uname')
+ def test_detect_platform_s390(self, mock_uname, mock_cloud, mock_status):
+ mock_uname.return_value = mock.Mock(machine="s390")
+ res = self.ctx_inst.detect_platform()
+ self.assertEqual(res, bootstrap.Context.S390_PROFILE_NAME)
+ mock_uname.assert_called_once_with()
+ mock_cloud.assert_not_called()
+ mock_status.assert_called_once_with("Detected \"{}\" platform".format(res))
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.detect_cloud')
+ @mock.patch('os.uname')
+ def test_detect_platform(self, mock_uname, mock_cloud, mock_status):
+ mock_uname.return_value = mock.Mock(machine="xxx")
+ mock_cloud.return_value = "azure"
+ res = self.ctx_inst.detect_platform()
+ self.assertEqual(res, "azure")
+ mock_uname.assert_called_once_with()
+ mock_cloud.assert_called_once_with()
+ mock_status.assert_called_once_with("Detected \"{}\" platform".format(res))
+
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file_not_exist(self, mock_platform, mock_exists):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = False
+ self.ctx_inst.load_profiles()
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+
+ @mock.patch('yaml.load')
+ @mock.patch('builtins.open', new_callable=mock.mock_open, read_data="")
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file_empty(self, mock_platform, mock_exists, mock_open_file, mock_load):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = True
+ mock_load.return_value = ""
+ self.ctx_inst.load_profiles()
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_open_file.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_load.assert_called_once_with(mock_open_file.return_value, Loader=yaml.SafeLoader)
+
+ @mock.patch('crmsh.bootstrap.Context.load_specific_profile')
+ @mock.patch('yaml.load')
+ @mock.patch('builtins.open', new_callable=mock.mock_open, read_data="")
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.bootstrap.Context.detect_platform')
+ def test_load_profiles_file(self, mock_platform, mock_exists, mock_open_file, mock_load, mock_load_specific):
+ mock_platform.return_value = "s390"
+ mock_exists.return_value = True
+ mock_load.return_value = "data"
+ mock_load_specific.side_effect = [
+ {"name": "xin", "age": 18},
+ {"name": "wang"}
+ ]
+
+ self.ctx_inst.load_profiles()
+ assert self.ctx_inst.profiles_dict == {"name": "wang", "age": 18}
+
+ mock_platform.assert_called_once_with()
+ mock_exists.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_open_file.assert_called_once_with(bootstrap.PROFILES_FILE)
+ mock_load.assert_called_once_with(mock_open_file.return_value, Loader=yaml.SafeLoader)
+ mock_load_specific.assert_has_calls([
+ mock.call(bootstrap.Context.DEFAULT_PROFILE_NAME),
+ mock.call("s390")
+ ])
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_without_args_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = None
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_without_args_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'alice'
+ context = bootstrap.Context()
+ context.cluster_node = None
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_without_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = 'node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_with_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.cluster_node = 'alice@node1'
+ context.user_at_node_list = None
+ with self.assertRaises(ValueError):
+ context.initialize_user()
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_without_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.cluster_node = 'node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_cluster_node_with_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.cluster_node = 'alice@node1'
+ context.user_at_node_list = None
+ context.initialize_user()
+ self.assertEqual('bob', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_without_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.user_at_node_list = ['node1', 'node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_with_user_without_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = None
+ context = bootstrap.Context()
+ context.user_at_node_list = ['alice@node1', 'alice@node2']
+ context.cluster_node = None
+ with self.assertRaises(ValueError):
+ context.initialize_user()
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_without_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.user_at_node_list = ['node1', 'node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('root', context.current_user)
+
+ @mock.patch('crmsh.userdir.get_sudoer')
+ @mock.patch('crmsh.userdir.getuser')
+ def test_initialize_user_node_list_with_user_with_sudoer(self, mock_getuser: mock.MagicMock, mock_get_sudoer: mock.MagicMock):
+ mock_getuser.return_value = 'root'
+ mock_get_sudoer.return_value = 'bob'
+ context = bootstrap.Context()
+ context.user_at_node_list = ['alice@node1', 'alice@node2']
+ context.cluster_node = None
+ context.initialize_user()
+ self.assertEqual('bob', context.current_user)
+
+
+class TestBootstrap(unittest.TestCase):
+ """
+ Unitary tests for crmsh/bootstrap.py
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.qdevice_with_ip = qdevice.QDevice("10.10.10.123")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.parallax.parallax_call')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.sbd.SBDTimeout.is_sbd_delay_start')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_start_pacemaker(self, mock_installed, mock_enabled, mock_delay_start, mock_start, mock_parallax_call):
+ bootstrap._context = None
+ mock_installed.return_value = True
+ mock_enabled.return_value = True
+ mock_delay_start.return_value = True
+ node_list = ["node1", "node2", "node3", "node4", "node5", "node6"]
+ bootstrap.start_pacemaker(node_list)
+ mock_start.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("corosync.service", remote_addr="node2"),
+ mock.call("corosync.service", remote_addr="node3"),
+ mock.call("corosync.service", remote_addr="node4"),
+ mock.call("corosync.service", remote_addr="node5"),
+ mock.call("corosync.service", remote_addr="node6"),
+ mock.call("pacemaker.service", enable=False, node_list=node_list)
+ ])
+ mock_parallax_call.assert_has_calls([
+ mock.call(node_list, 'mkdir -p /run/systemd/system/sbd.service.d/'),
+ mock.call(node_list, "echo -e '[Service]\nUnsetEnvironment=SBD_DELAY_START' > /run/systemd/system/sbd.service.d/sbd_delay_start_disabled.conf"),
+ mock.call(node_list, "systemctl daemon-reload"),
+ ])
+
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_init_ssh(self, mock_start_service, mock_config_ssh):
+ bootstrap._context = mock.Mock(current_user="alice", user_at_node_list=[], use_ssh_agent=False)
+ bootstrap.init_ssh()
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("alice")
+ ])
+
+ @mock.patch('crmsh.userdir.gethomedir')
+ def test_key_files(self, mock_gethome):
+ mock_gethome.return_value = "/root"
+ expected_res = {"private": "/root/.ssh/id_rsa", "public": "/root/.ssh/id_rsa.pub", "authorized": "/root/.ssh/authorized_keys"}
+ self.assertEqual(bootstrap.key_files("root"), expected_res)
+ mock_gethome.assert_called_once_with("root")
+
+ @mock.patch('builtins.open')
+ def test_is_nologin(self, mock_open_file):
+ data = "hacluster:x:90:90:heartbeat processes:/var/lib/heartbeat/cores/hacluster:/sbin/nologin"
+ mock_open_file.return_value = mock.mock_open(read_data=data).return_value
+ assert bootstrap.is_nologin("hacluster") is not None
+ mock_open_file.assert_called_once_with("/etc/passwd")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.is_nologin')
+ def test_change_user_shell_return(self, mock_nologin, mock_status, mock_confirm):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_nologin.return_value = True
+ mock_confirm.return_value = False
+
+ bootstrap.change_user_shell("hacluster")
+
+ mock_nologin.assert_called_once_with("hacluster", None)
+ mock_confirm.assert_called_once_with("Continue?")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.is_nologin')
+ def test_change_user_shell(self, mock_nologin, mock_invoke):
+ bootstrap._context = mock.Mock(yes_to_all=True)
+ mock_nologin.return_value = True
+
+ bootstrap.change_user_shell("hacluster")
+
+ mock_nologin.assert_called_once_with("hacluster", None)
+ mock_invoke.assert_called_once_with("usermod -s /bin/bash hacluster", None)
+
+ @mock.patch('crmsh.sh.LocalShell.su_subprocess_run')
+ def test_generate_ssh_key_pair_on_remote(self, mock_su: mock.MagicMock):
+ mock_su.return_value = mock.Mock(returncode=0, stdout=b'')
+ bootstrap.generate_ssh_key_pair_on_remote('local_sudoer', 'remote_host', 'remote_sudoer', 'remote_user')
+ mock_su.assert_has_calls([
+ mock.call(
+ 'local_sudoer',
+ 'ssh -o StrictHostKeyChecking=no remote_sudoer@remote_host sudo -H -u remote_user /bin/sh',
+ input='''
+[ -f ~/.ssh/id_rsa ] || ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -C "Cluster internal on $(hostname)" -N ''
+[ -f ~/.ssh/id_rsa.pub ] || ssh-keygen -y -f ~/.ssh/id_rsa > ~/.ssh/id_rsa.pub
+'''.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ ),
+ mock.call(
+ 'local_sudoer',
+ 'ssh -o StrictHostKeyChecking=no remote_sudoer@remote_host sudo -H -u remote_user /bin/sh',
+ input='cat ~/.ssh/id_rsa.pub'.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ ),
+ ])
+
+ @mock.patch('crmsh.bootstrap.append_unique')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.utils.detect_file')
+ @mock.patch('crmsh.bootstrap.key_files')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ def _test_configure_ssh_key(self, mock_change_shell, mock_key_files, mock_detect, mock_su, mock_append_unique):
+ mock_key_files.return_value = {"private": "/test/.ssh/id_rsa", "public": "/test/.ssh/id_rsa.pub", "authorized": "/test/.ssh/authorized_keys"}
+ mock_detect.side_effect = [True, True, False]
+
+ bootstrap.configure_ssh_key("test")
+
+ mock_change_shell.assert_called_once_with("test")
+ mock_key_files.assert_called_once_with("test")
+ mock_detect.assert_has_calls([
+ mock.call("/test/.ssh/id_rsa"),
+ mock.call("/test/.ssh/id_rsa.pub"),
+ mock.call("/test/.ssh/authorized_keys")
+ ])
+ mock_append_unique.assert_called_once_with("/test/.ssh/id_rsa.pub", "/test/.ssh/authorized_keys", "test")
+ mock_su.assert_called_once_with('test', 'touch /test/.ssh/authorized_keys')
+
+ @mock.patch('crmsh.ssh_key.AuthorizedKeyManager.add')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.ensure_key_pair_exists_for_user')
+ def test_configure_ssh_key(self, mock_ensure_key_pair, mock_add):
+ public_key = crmsh.ssh_key.InMemoryPublicKey('foo')
+ mock_ensure_key_pair.return_value = (True, [public_key])
+ bootstrap.configure_ssh_key('alice')
+ mock_ensure_key_pair.assert_called_once_with(None, 'alice')
+ mock_add.assert_called_once_with(None, 'alice', public_key)
+
+ @mock.patch('crmsh.bootstrap.append_to_remote_file')
+ @mock.patch('crmsh.utils.check_file_content_included')
+ def test_append_unique_remote(self, mock_check, mock_append):
+ mock_check.return_value = False
+ bootstrap.append_unique("fromfile", "tofile", user="root", remote="node1", from_local=True)
+ mock_check.assert_called_once_with("fromfile", "tofile", remote="node1", source_local=True)
+ mock_append.assert_called_once_with("fromfile", "root", "node1", "tofile")
+
+ @mock.patch('crmsh.bootstrap.append')
+ @mock.patch('crmsh.utils.check_file_content_included')
+ def test_append_unique(self, mock_check, mock_append):
+ mock_check.return_value = False
+ bootstrap.append_unique("fromfile", "tofile")
+ mock_check.assert_called_once_with("fromfile", "tofile", remote=None, source_local=False)
+ mock_append.assert_called_once_with("fromfile", "tofile", remote=None)
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_append_to_remote_file(self, mock_run):
+ bootstrap.append_to_remote_file("fromfile", "root", "node1", "tofile")
+ cmd = "cat fromfile | ssh {} root@node1 'cat >> tofile'".format(constants.SSH_OPTION)
+ mock_run.assert_called_once_with(cmd)
+
+ @mock.patch('crmsh.utils.fatal')
+ def test_join_ssh_no_seed_host(self, mock_error):
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError):
+ bootstrap.join_ssh(None, None)
+ mock_error.assert_called_once_with("No existing IP/hostname specified (use -c option)")
+
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key_for_secondary_user')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_join_ssh(
+ self,
+ mock_start_service, mock_config_ssh, mock_ssh_copy_id, mock_swap, mock_invoke, mock_change, mock_swap_2,
+ ):
+ bootstrap._context = mock.Mock(current_user="bob", default_nic_list=["eth1"], use_ssh_agent=False)
+ mock_invoke.return_value = ''
+ mock_swap.return_value = None
+ mock_ssh_copy_id.return_value = 0
+
+ bootstrap.join_ssh("node1", "alice")
+
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("bob"),
+ mock.call("hacluster"),
+ ])
+ mock_ssh_copy_id.assert_called_once_with("bob", "alice", "node1")
+ mock_swap.assert_called_once_with("node1", "bob", "alice", "bob", "alice", add=True)
+ mock_invoke.assert_called_once_with(
+ "bob",
+ "ssh {} alice@node1 sudo crm cluster init -i eth1 ssh_remote".format(constants.SSH_OPTION),
+ )
+ mock_swap_2.assert_called_once()
+ args, kwargs = mock_swap_2.call_args
+ self.assertEqual(3, len(args))
+ self.assertEqual('node1', args[1])
+ self.assertEqual('hacluster', args[2])
+
+ @mock.patch('crmsh.ssh_key.AuthorizedKeyManager.add')
+ @mock.patch('crmsh.ssh_key.KeyFile.public_key')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.ensure_key_pair_exists_for_user')
+ @mock.patch('crmsh.ssh_key.KeyFileManager.list_public_key_for_user')
+ @mock.patch('logging.Logger.info')
+ def test_swap_public_ssh_key_for_secondary_user(
+ self,
+ mock_log_info,
+ mock_list_public_key_for_user,
+ mock_ensure_key_pair_exists_for_user,
+ mock_public_key,
+ mock_authorized_key_manager_add,
+ ):
+ mock_shell = mock.Mock(
+ crmsh.sh.ClusterShell,
+ local_shell=mock.Mock(crmsh.sh.LocalShell),
+ user_of_host=mock.Mock(crmsh.user_of_host.UserOfHost),
+ )
+ mock_list_public_key_for_user.return_value = ['~/.ssh/id_rsa', '~/.ssh/id_ed25519']
+ mock_ensure_key_pair_exists_for_user.return_value = (True, [
+ crmsh.ssh_key.InMemoryPublicKey('foo'),
+ crmsh.ssh_key.InMemoryPublicKey('bar'),
+ ])
+ mock_public_key.return_value = 'public_key'
+ crmsh.bootstrap.swap_public_ssh_key_for_secondary_user(mock_shell, 'node1', 'alice')
+ mock_list_public_key_for_user.assert_called_once_with(None, 'alice')
+ mock_ensure_key_pair_exists_for_user.assert_called_once_with('node1', 'alice')
+ mock_authorized_key_manager_add.assert_has_calls([
+ mock.call(None, 'alice', crmsh.ssh_key.InMemoryPublicKey('foo')),
+ mock.call('node1', 'alice', crmsh.ssh_key.KeyFile('~/.ssh/id_rsa')),
+ ])
+ mock_log_info.assert_called_with("A new ssh keypair is generated for user %s@%s.", 'alice', 'node1')
+
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.sh.LocalShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ def test_join_ssh_bad_credential(self, mock_start_service, mock_config_ssh, mock_ssh_copy_id, mock_swap, mock_invoke, mock_change):
+ bootstrap._context = mock.Mock(current_user="bob", default_nic_list=["eth1"], use_ssh_agent=False)
+ mock_invoke.return_value = ''
+ mock_swap.return_value = None
+ mock_ssh_copy_id.return_value = 255
+
+ with self.assertRaises(ValueError):
+ bootstrap.join_ssh("node1", "alice")
+
+ mock_start_service.assert_called_once_with("sshd.service", enable=True)
+ mock_config_ssh.assert_has_calls([
+ mock.call("bob"),
+ ])
+ mock_ssh_copy_id.assert_called_once_with("bob", "alice", "node1")
+ mock_swap.assert_not_called()
+ mock_invoke.assert_not_called()
+
+
+ @mock.patch('crmsh.bootstrap.import_ssh_key')
+ @mock.patch('crmsh.bootstrap.export_ssh_key_non_interactive')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ def test_swap_public_ssh_key_exception(self, mock_check_passwd, mock_warn, mock_export_ssh_key, mock_import_ssh):
+ mock_check_passwd.return_value = False
+ mock_import_ssh.side_effect = ValueError("Can't get the remote id_rsa.pub from {}: {}")
+
+ bootstrap.swap_public_ssh_key("node1", "bob", "bob", "alice", "alice")
+
+ mock_check_passwd.assert_called_once_with("bob", "bob", "node1")
+ mock_import_ssh.assert_called_once_with("bob", "bob", "alice", "node1", "alice")
+ mock_warn.assert_called_once_with(mock_import_ssh.side_effect)
+
+ @mock.patch('crmsh.bootstrap.import_ssh_key')
+ @mock.patch('crmsh.bootstrap.export_ssh_key_non_interactive')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ def test_swap_public_ssh_key(self, mock_check_passwd, mock_export_ssh, mock_import_ssh):
+ mock_check_passwd.return_value = True
+
+ bootstrap.swap_public_ssh_key("node1", "bob", "bob", "alice", "alice")
+
+ mock_check_passwd.assert_called_once_with("bob", "bob", "node1")
+ mock_export_ssh.assert_called_once_with("bob", "bob", "node1", "alice", "alice")
+ mock_import_ssh.assert_called_once_with("bob", "bob", "alice", "node1", "alice")
+
+ @mock.patch('crmsh.utils.this_node')
+ def test_bootstrap_add_return(self, mock_this_node):
+ ctx = mock.Mock(user_at_node_list=[], use_ssh_agent=False)
+ bootstrap.bootstrap_add(ctx)
+ mock_this_node.assert_not_called()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.this_node')
+ def test_bootstrap_add(self, mock_this_node, mock_info, mock_run):
+ ctx = mock.Mock(current_user="alice", user_at_node_list=["bob@node2", "carol@node3"], nic_list=["eth1"], use_ssh_agent=False)
+ mock_this_node.return_value = "node1"
+ bootstrap.bootstrap_add(ctx)
+ mock_info.assert_has_calls([
+ mock.call("Adding node node2 to cluster"),
+ mock.call("Running command on node2: crm cluster join -y -i eth1 -c alice@node1"),
+ mock.call("Adding node node3 to cluster"),
+ mock.call("Running command on node3: crm cluster join -y -i eth1 -c alice@node1")
+ ])
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes_failed_fetch_nodelist(self, mock_run, mock_error):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ mock_run.return_value = (1, None, None)
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_called_once_with('node1', 'crm_node -l')
+ mock_error.assert_called_once_with("Can't fetch cluster nodes list from node1: None")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.bootstrap._fetch_core_hosts')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes_failed_fetch_hostname(
+ self,
+ mock_run,
+ mock_fetch_core_hosts,
+ mock_host_user_config_class,
+ mock_error,
+ ):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ out_node_list = """1 node1 member
+ 2 node2 member"""
+ mock_run.side_effect = [
+ (0, out_node_list, None),
+ (1, None, None)
+ ]
+ mock_fetch_core_hosts.return_value = (["alice", "bob"], ["node1", "node2"])
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_has_calls([
+ mock.call('node1', 'crm_node -l'),
+ mock.call('node1', 'hostname'),
+ ])
+ mock_error.assert_called_once_with("Can't fetch hostname of node1: None")
+
+ @mock.patch('crmsh.bootstrap.swap_key_for_hacluster')
+ @mock.patch('crmsh.bootstrap.change_user_shell')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.bootstrap._fetch_core_hosts')
+ @mock.patch('crmsh.utils.ssh_copy_id')
+ @mock.patch('crmsh.utils.user_of')
+ @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_setup_passwordless_with_other_nodes(
+ self,
+ mock_run,
+ mock_swap,
+ mock_userof,
+ mock_ssh_copy_id: mock.MagicMock,
+ mock_fetch_core_hosts,
+ mock_host_user_config_class,
+ mock_change_shell,
+ mock_swap_hacluster
+ ):
+ bootstrap._context = mock.Mock(current_user="carol", use_ssh_agent=False)
+ mock_fetch_core_hosts.return_value = (["alice", "bob"], ["node1", "node2"])
+ mock_userof.return_value = "bob"
+ out_node_list = """1 node1 member
+ 2 node2 member"""
+ mock_run.side_effect = [
+ (0, out_node_list, None),
+ (0, "node1", None)
+ ]
+
+ bootstrap.setup_passwordless_with_other_nodes("node1", "alice")
+
+ mock_run.assert_has_calls([
+ mock.call('node1', 'crm_node -l'),
+ mock.call('node1', 'hostname'),
+ ])
+ mock_userof.assert_called_once_with("node2")
+ mock_ssh_copy_id.assert_has_calls([
+ mock.call('carol', 'bob', 'node2')
+ ])
+ mock_swap.assert_has_calls([
+ mock.call('node2', "carol", "bob", "carol", "bob"),
+ mock.call('node2', 'hacluster', 'hacluster', 'carol', 'bob', add=True)
+ ])
+
+ @mock.patch('crmsh.userdir.getuser')
+ @mock.patch('crmsh.bootstrap.key_files')
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.bootstrap.append')
+ @mock.patch('os.path.join')
+ @mock.patch('os.path.exists')
+ def test_init_ssh_remote_no_sshkey(self, mock_exists, mock_join, mock_append, mock_open_file, mock_key_files, mock_getuser):
+ mock_getuser.return_value = "alice"
+ mock_key_files.return_value = {"private": "/home/alice/.ssh/id_rsa", "public": "/home/alice/.ssh/id_rsa.pub", "authorized": "/home/alice/.ssh/authorized_keys"}
+ mock_exists.side_effect = [False, True, False, False, False]
+ mock_join.side_effect = ["/home/alice/.ssh/id_rsa",
+ "/home/alice/.ssh/id_dsa",
+ "/home/alice/.ssh/id_ecdsa",
+ "/home/alice/.ssh/id_ed25519"]
+ mock_open_file.side_effect = [
+ mock.mock_open().return_value,
+ mock.mock_open(read_data="data1 data2").return_value,
+ mock.mock_open(read_data="data1111").return_value
+ ]
+
+ bootstrap.init_ssh_remote()
+
+ mock_getuser.assert_called_once_with()
+ mock_key_files.assert_called_once_with("alice")
+
+ mock_open_file.assert_has_calls([
+ mock.call("/home/alice/.ssh/authorized_keys", 'w'),
+ mock.call("/home/alice/.ssh/authorized_keys", "r+"),
+ mock.call("/home/alice/.ssh/id_rsa.pub")
+ ])
+ mock_exists.assert_has_calls([
+ mock.call("/home/alice/.ssh/authorized_keys"),
+ mock.call("/home/alice/.ssh/id_rsa"),
+ mock.call("/home/alice/.ssh/id_dsa"),
+ mock.call("/home/alice/.ssh/id_ecdsa"),
+ mock.call("/home/alice/.ssh/id_ed25519"),
+ ])
+ mock_append.assert_called_once_with("/home/alice/.ssh/id_rsa.pub", "/home/alice/.ssh/authorized_keys")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_get_node_canonical_hostname(self, mock_run):
+ mock_run.return_value = (0, "Node1", None)
+
+ peer_node = bootstrap.get_node_canonical_hostname('node1')
+ self.assertEqual('Node1', peer_node)
+ mock_run.assert_called_once_with('node1', 'crm_node --name')
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_get_node_canonical_hostname_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.get_node_canonical_hostname('node1')
+
+ mock_run.assert_called_once_with("node1", "crm_node --name")
+ mock_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_local_offline(self, mock_is_online, mock_get_hostname, mock_this_node):
+ bootstrap._context = mock.Mock(cluster_node='node2')
+ mock_this_node.return_value = "node1"
+ mock_is_online.return_value = False
+
+ assert bootstrap.is_online() is False
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_not_called()
+ mock_is_online.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_on_init_node(self, mock_is_online, mock_get_hostname, mock_this_node):
+ bootstrap._context = mock.Mock(cluster_node=None)
+ mock_this_node.return_value = "node1"
+ mock_is_online.return_value = True
+
+ assert bootstrap.is_online() is True
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_not_called()
+ mock_is_online.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('shutil.copy')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_peer_offline(self, mock_is_online, mock_get_hostname, mock_this_node,
+ mock_copy, mock_corosync_conf, mock_csync2, mock_stop_service, mock_error):
+ bootstrap._context = mock.Mock(cluster_node='node1')
+ mock_is_online.side_effect = [True, False]
+ bootstrap.COROSYNC_CONF_ORIG = "/tmp/crmsh_tmpfile"
+ mock_this_node.return_value = "node2"
+ mock_get_hostname.return_value = "node1"
+ mock_corosync_conf.side_effect = [ "/etc/corosync/corosync.conf",
+ "/etc/corosync/corosync.conf"]
+
+ bootstrap.is_online()
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_called_once_with('node1')
+ mock_corosync_conf.assert_has_calls([
+ mock.call(),
+ mock.call()
+ ])
+ mock_copy.assert_called_once_with(bootstrap.COROSYNC_CONF_ORIG, "/etc/corosync/corosync.conf")
+ mock_csync2.assert_called_once_with("/etc/corosync/corosync.conf")
+ mock_stop_service.assert_called_once_with("corosync")
+ mock_error.assert_called_once_with("Cannot see peer node \"node1\", please check the communication IP")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('shutil.copy')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser.is_node_online')
+ def test_is_online_both_online(self, mock_is_online, mock_get_hostname, mock_this_node,
+ mock_copy, mock_corosync_conf, mock_csync2, mock_stop_service, mock_error):
+ bootstrap._context = mock.Mock(cluster_node='node2')
+ mock_is_online.side_effect = [True, True]
+ mock_this_node.return_value = "node2"
+ mock_get_hostname.return_value = "node2"
+
+ assert bootstrap.is_online() is True
+
+ mock_this_node.assert_called_once_with()
+ mock_get_hostname.assert_called_once_with('node2')
+ mock_corosync_conf.assert_not_called()
+ mock_copy.assert_not_called()
+ mock_csync2.assert_not_called()
+ mock_stop_service.assert_not_called()
+ mock_error.assert_not_called()
+
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_csync2_update_no_conflicts(self, mock_invoke, mock_invokerc):
+ mock_invokerc.return_value = True
+ bootstrap.csync2_update("/etc/corosync.conf")
+ mock_invoke.assert_called_once_with("csync2 -rm /etc/corosync.conf")
+ mock_invokerc.assert_called_once_with("csync2 -rxv /etc/corosync.conf")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_csync2_update(self, mock_invoke, mock_invokerc, mock_warn):
+ mock_invokerc.side_effect = [False, False]
+ bootstrap.csync2_update("/etc/corosync.conf")
+ mock_invoke.assert_has_calls([
+ mock.call("csync2 -rm /etc/corosync.conf"),
+ mock.call("csync2 -rf /etc/corosync.conf")
+ ])
+ mock_invokerc.assert_has_calls([
+ mock.call("csync2 -rxv /etc/corosync.conf"),
+ mock.call("csync2 -rxv /etc/corosync.conf")
+ ])
+ mock_warn.assert_called_once_with("/etc/corosync.conf was not synced")
+
+ @mock.patch('crmsh.utils.InterfacesInfo')
+ def test_init_network(self, mock_interfaces):
+ mock_interfaces_inst = mock.Mock()
+ mock_interfaces.return_value = mock_interfaces_inst
+ mock_interfaces_inst.get_default_nic_list_from_route.return_value = ["eth0", "eth1"]
+ bootstrap._context = mock.Mock(ipv6=False, second_heartbeat=False, nic_list=["eth0", "eth1"], default_nic_list=["eth0", "eth1"])
+
+ bootstrap.init_network()
+
+ mock_interfaces.assert_called_once_with(False, False, bootstrap._context.nic_list)
+ mock_interfaces_inst.get_interfaces_info.assert_called_once_with()
+ mock_interfaces_inst.get_default_nic_list_from_route.assert_called_once_with()
+ mock_interfaces_inst.get_default_ip_list.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.disable_service')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_no_config(self, mock_status, mock_disable):
+ bootstrap._context = mock.Mock(qdevice_inst=None)
+ bootstrap.init_qdevice()
+ mock_status.assert_not_called()
+ mock_disable.assert_called_once_with("corosync-qdevice.service")
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.utils.ssh_copy_id_no_raise')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_copy_ssh_key_failed(
+ self,
+ mock_status, mock_check_ssh_passwd_need,
+ mock_configure_ssh_key, mock_ssh_copy_id, mock_list_nodes, mock_user_of_host,
+ mock_host_user_config_class,
+ ):
+ mock_list_nodes.return_value = []
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_check_ssh_passwd_need.return_value = True
+ mock_ssh_copy_id.return_value = 255
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+
+ with self.assertRaises(ValueError):
+ bootstrap.init_qdevice()
+
+ mock_status.assert_has_calls([
+ mock.call("Configure Qdevice/Qnetd:"),
+ ])
+ mock_check_ssh_passwd_need.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_configure_ssh_key.assert_called_once_with('bob')
+ mock_ssh_copy_id.assert_called_once_with('bob', 'bob', '10.10.10.123')
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_already_configured(
+ self,
+ mock_status, mock_ssh, mock_configure_ssh_key,
+ mock_qdevice_configured, mock_confirm, mock_list_nodes, mock_user_of_host,
+ mock_host_user_config_class,
+ ):
+ mock_list_nodes.return_value = []
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_ssh.return_value = False
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = False
+ self.qdevice_with_ip.start_qdevice_service = mock.Mock()
+
+ bootstrap.init_qdevice()
+
+ mock_status.assert_called_once_with("Configure Qdevice/Qnetd:")
+ mock_ssh.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_configure_ssh_key.assert_not_called()
+ mock_host_user_config_class.return_value.save_remote.assert_called_once_with(mock_list_nodes.return_value)
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Qdevice is already configured - overwrite?")
+ self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.user_of_host.UserOfHost.instance')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.bootstrap.configure_ssh_key')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice(self, mock_info, mock_ssh, mock_configure_ssh_key, mock_qdevice_configured,
+ mock_this_node, mock_list_nodes, mock_adjust_priority, mock_adjust_fence_delay,
+ mock_user_of_host, mock_host_user_config_class):
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob")
+ mock_this_node.return_value = "192.0.2.100"
+ mock_list_nodes.return_value = []
+ mock_ssh.return_value = False
+ mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost)
+ mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob"
+ mock_user_of_host.return_value.use_ssh_agent.return_value = False
+ mock_qdevice_configured.return_value = False
+ self.qdevice_with_ip.set_cluster_name = mock.Mock()
+ self.qdevice_with_ip.valid_qnetd = mock.Mock()
+ self.qdevice_with_ip.config_and_start_qdevice = mock.Mock()
+
+ bootstrap.init_qdevice()
+
+ mock_info.assert_called_once_with("Configure Qdevice/Qnetd:")
+ mock_ssh.assert_called_once_with("bob", "bob", "10.10.10.123")
+ mock_host_user_config_class.return_value.add.assert_has_calls([
+ mock.call('bob', '192.0.2.100'),
+ mock.call('bob', '10.10.10.123'),
+ ])
+ mock_host_user_config_class.return_value.save_remote.assert_called_once_with(mock_list_nodes.return_value)
+ mock_qdevice_configured.assert_called_once_with()
+ self.qdevice_with_ip.set_cluster_name.assert_called_once_with()
+ self.qdevice_with_ip.valid_qnetd.assert_called_once_with()
+ self.qdevice_with_ip.config_and_start_qdevice.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.HostUserConfig')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('logging.Logger.info')
+ def test_init_qdevice_service_not_available(
+ self,
+ mock_info, mock_list_nodes, mock_available,
+ mock_host_user_config_class,
+ mock_fatal,
+ ):
+ bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip)
+ mock_list_nodes.return_value = ["node1"]
+ mock_available.return_value = False
+ mock_fatal.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.init_qdevice()
+
+ mock_host_user_config_class.return_value.save_local.assert_not_called()
+ mock_host_user_config_class.return_value.save_remote.assert_not_called()
+ mock_fatal.assert_called_once_with("corosync-qdevice.service is not available on node1")
+ mock_available.assert_called_once_with("corosync-qdevice.service", "node1")
+ mock_info.assert_called_once_with("Configure Qdevice/Qnetd:")
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ def test_configure_qdevice_interactive_return(self, mock_prompt):
+ bootstrap._context = mock.Mock(yes_to_all=True)
+ bootstrap.configure_qdevice_interactive()
+ mock_prompt.assert_not_called()
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive_not_confirm(self, mock_confirm, mock_info):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = False
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_called_once_with("Do you want to configure QDevice?")
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive_not_installed(self, mock_confirm, mock_info, mock_installed, mock_error):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.side_effect = [True, False]
+ mock_installed.side_effect = ValueError("corosync-qdevice not installed")
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_has_calls([
+ mock.call("Do you want to configure QDevice?"),
+ mock.call("Please install the package manually and press 'y' to continue")
+ ])
+
+ @mock.patch('crmsh.qdevice.QDevice')
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.confirm')
+ def test_configure_qdevice_interactive(self, mock_confirm, mock_info, mock_installed, mock_prompt, mock_qdevice):
+ bootstrap._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_prompt.side_effect = ["alice@qnetd-node", 5403, "ffsplit", "lowest", "on", None]
+ mock_qdevice_inst = mock.Mock()
+ mock_qdevice.return_value = mock_qdevice_inst
+
+ bootstrap.configure_qdevice_interactive()
+ mock_confirm.assert_called_once_with("Do you want to configure QDevice?")
+ mock_prompt.assert_has_calls([
+ mock.call("HOST or IP of the QNetd server to be used",
+ valid_func=qdevice.QDevice.check_qnetd_addr),
+ mock.call("TCP PORT of QNetd server", default=5403,
+ valid_func=qdevice.QDevice.check_qdevice_port),
+ mock.call("QNetd decision ALGORITHM (ffsplit/lms)", default="ffsplit",
+ valid_func=qdevice.QDevice.check_qdevice_algo),
+ mock.call("QNetd TIE_BREAKER (lowest/highest/valid node id)", default="lowest",
+ valid_func=qdevice.QDevice.check_qdevice_tie_breaker),
+ mock.call("Whether using TLS on QDevice/QNetd (on/off/required)", default="on",
+ valid_func=qdevice.QDevice.check_qdevice_tls),
+ mock.call("Heuristics COMMAND to run with absolute path; For multiple commands, use \";\" to separate",
+ valid_func=qdevice.QDevice.check_qdevice_heuristics,
+ allow_empty=True)
+ ])
+ mock_qdevice.assert_called_once_with('qnetd-node', port=5403, ssh_user='alice', algo='ffsplit', tie_breaker='lowest', tls='on', cmds=None, mode=None, is_stage=False)
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_no_configured(self, mock_qdevice_configured, mock_error):
+ mock_qdevice_configured.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_error.assert_called_once_with("No QDevice configuration in this cluster")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_not_confirmed(self, mock_qdevice_configured, mock_confirm):
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = False
+
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Removing QDevice service and configuration from cluster: Are you sure?")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.qdevice.QDevice.remove_certification_files_on_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_db')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_config')
+ @mock.patch('crmsh.bootstrap.update_expected_votes')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.qdevice.evaluate_qdevice_quorum_effect')
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_qdevice_reload(self, mock_qdevice_configured, mock_confirm, mock_reachable, mock_evaluate,
+ mock_status, mock_invoke, mock_status_long, mock_update_votes, mock_remove_config, mock_remove_db,
+ mock_remove_files, mock_adjust_priority, mock_adjust_fence_delay, mock_service_is_active):
+ mock_qdevice_configured.return_value = True
+ mock_confirm.return_value = True
+ mock_evaluate.return_value = qdevice.QdevicePolicy.QDEVICE_RELOAD
+ mock_service_is_active.return_value = False
+
+ bootstrap.remove_qdevice()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_confirm.assert_called_once_with("Removing QDevice service and configuration from cluster: Are you sure?")
+ mock_reachable.assert_called_once_with()
+ mock_evaluate.assert_called_once_with(qdevice.QDEVICE_REMOVE)
+ mock_status.assert_has_calls([
+ mock.call("Disable corosync-qdevice.service"),
+ mock.call("Stopping corosync-qdevice.service")
+ ])
+ mock_invoke.assert_has_calls([
+ mock.call("crm cluster run 'systemctl disable corosync-qdevice'"),
+ mock.call("crm cluster run 'systemctl stop corosync-qdevice'"),
+ mock.call("crm cluster run 'crm corosync reload'")
+ ] )
+ mock_status_long.assert_called_once_with("Removing QDevice configuration from cluster")
+ mock_update_votes.assert_called_once_with()
+ mock_remove_config.assert_called_once_with()
+ mock_remove_db.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.qdevice.QDevice')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.utils.is_qdevice_tls_on')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.corosync.conf')
+ @mock.patch('crmsh.corosync.add_nodelist_from_cmaptool')
+ @mock.patch('crmsh.corosync.is_unicast')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_start_qdevice_on_join_node(self, mock_status_long, mock_is_unicast, mock_add_nodelist,
+ mock_conf, mock_csync2_update, mock_invoke, mock_qdevice_tls,
+ mock_get_value, mock_qdevice, mock_start_service):
+ mock_is_unicast.return_value = False
+ mock_qdevice_tls.return_value = True
+ mock_conf.return_value = "corosync.conf"
+ mock_get_value.return_value = "10.10.10.123"
+ mock_qdevice_inst = mock.Mock()
+ mock_qdevice.return_value = mock_qdevice_inst
+ mock_qdevice_inst.certificate_process_on_join = mock.Mock()
+
+ bootstrap.start_qdevice_on_join_node("node2")
+
+ mock_status_long.assert_called_once_with("Starting corosync-qdevice.service")
+ mock_is_unicast.assert_called_once_with()
+ mock_add_nodelist.assert_called_once_with()
+ mock_conf.assert_called_once_with()
+ mock_csync2_update.assert_called_once_with("corosync.conf")
+ mock_invoke.assert_called_once_with("crm corosync reload")
+ mock_qdevice_tls.assert_called_once_with()
+ mock_get_value.assert_called_once_with("quorum.device.net.host")
+ mock_qdevice.assert_called_once_with("10.10.10.123", cluster_node="node2")
+ mock_qdevice_inst.certificate_process_on_join.assert_called_once_with()
+ mock_start_service.assert_called_once_with("corosync-qdevice.service", enable=True)
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.log.LoggerUtils.log_only_to_file')
+ def test_invoke(self, mock_log, mock_run):
+ mock_run.return_value = (0, "output", "error")
+ res = bootstrap.invoke("cmd --option")
+ self.assertEqual(res, (True, "output", "error"))
+ mock_log.assert_has_calls([
+ mock.call('invoke: cmd --option'),
+ mock.call('stdout: output'),
+ mock.call('stderr: error')
+ ])
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_invokerc(self, mock_invoke):
+ mock_invoke.return_value = (True, None, None)
+ res = bootstrap.invokerc("cmd")
+ self.assertEqual(res, True)
+ mock_invoke.assert_called_once_with("cmd")
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('os.path.isfile')
+ def test_sync_files_to_disk(self, mock_isfile, mock_cluster_cmd):
+ bootstrap.FILES_TO_SYNC = ("file1", "file2")
+ mock_isfile.side_effect = [True, True]
+ bootstrap.sync_files_to_disk()
+ mock_isfile.assert_has_calls([mock.call("file1"), mock.call("file2")])
+ mock_cluster_cmd.assert_called_once_with("sync file1 file2")
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.cib_factory')
+ def test_adjust_pcmk_delay_2node(self, mock_cib_factory, mock_run, mock_debug):
+ mock_cib_factory.refresh = mock.Mock()
+ mock_cib_factory.fence_id_list_without_pcmk_delay = mock.Mock()
+ mock_cib_factory.fence_id_list_without_pcmk_delay.return_value = ["res_1"]
+ bootstrap.adjust_pcmk_delay_max(True)
+ mock_run.assert_called_once_with("crm resource param res_1 set pcmk_delay_max {}s".format(constants.PCMK_DELAY_MAX))
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.cib_factory')
+ def test_adjust_pcmk_delay(self, mock_cib_factory, mock_run, mock_debug):
+ mock_cib_factory.refresh = mock.Mock()
+ mock_cib_factory.fence_id_list_with_pcmk_delay = mock.Mock()
+ mock_cib_factory.fence_id_list_with_pcmk_delay.return_value = ["res_1"]
+ bootstrap.adjust_pcmk_delay_max(False)
+ mock_run.assert_called_once_with("crm resource param res_1 delete pcmk_delay_max")
+
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_stonith_timeout_sbd(self, mock_is_active, mock_sbd_timeout):
+ mock_is_active.return_value = True
+ mock_sbd_timeout.adjust_sbd_timeout_related_cluster_configuration = mock.Mock()
+ bootstrap.adjust_stonith_timeout()
+ mock_sbd_timeout.adjust_sbd_timeout_related_cluster_configuration.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.bootstrap.get_stonith_timeout_generally_expected')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_stonith_timeout(self, mock_is_active, mock_get_timeout, mock_set):
+ mock_is_active.return_value = False
+ mock_get_timeout.return_value = 30
+ bootstrap.adjust_stonith_timeout()
+ mock_set.assert_called_once_with("stonith-timeout", 30, conditional=True)
+
+ @mock.patch('crmsh.utils.set_property')
+ def test_adjust_priority_in_rsc_defaults_2node(self, mock_set):
+ bootstrap.adjust_priority_in_rsc_defaults(True)
+ mock_set.assert_called_once_with('priority', 1, property_type='rsc_defaults', conditional=True)
+
+ @mock.patch('crmsh.utils.set_property')
+ def test_adjust_priority_in_rsc_defaults(self, mock_set):
+ bootstrap.adjust_priority_in_rsc_defaults(False)
+ mock_set.assert_called_once_with('priority', 0, property_type='rsc_defaults')
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_adjust_priority_fencing_delay_no_fence_agent(self, mock_run):
+ mock_run.return_value = None
+ bootstrap.adjust_priority_fencing_delay(False)
+ mock_run.assert_called_once_with("crm configure show related:stonith")
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_adjust_priority_fencing_delay_no_pcmk_delay(self, mock_run, mock_set):
+ mock_run.return_value = "data"
+ bootstrap.adjust_priority_fencing_delay(False)
+ mock_run.assert_called_once_with("crm configure show related:stonith")
+ mock_set.assert_called_once_with("priority-fencing-delay", 0)
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_properties_no_service(self, mock_is_active):
+ mock_is_active.return_value = False
+ bootstrap.adjust_properties()
+ mock_is_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.bootstrap.adjust_stonith_timeout')
+ @mock.patch('crmsh.bootstrap.adjust_pcmk_delay_max')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_adjust_properties(self, mock_is_active, mock_2node_qdevice, mock_adj_pcmk, mock_adj_stonith, mock_adj_priority, mock_adj_fence):
+ mock_is_active.return_value = True
+ mock_2node_qdevice.return_value = True
+ bootstrap.adjust_properties()
+ mock_is_active.assert_called_once_with("pacemaker.service")
+ mock_adj_pcmk.assert_called_once_with(True)
+ mock_adj_stonith.assert_called_once_with()
+ mock_adj_priority.assert_called_once_with(True)
+ mock_adj_fence.assert_called_once_with(True)
+
+ @mock.patch('crmsh.utils.cluster_copy_file')
+ def test_sync_file_skip_csync2(self, mock_copy):
+ bootstrap._context = mock.Mock(skip_csync2=True, node_list_in_cluster=["node1", "node2"])
+ bootstrap.sync_file("/file1")
+ mock_copy.assert_called_once_with("/file1", nodes=["node1", "node2"], output=False)
+
+ @mock.patch('crmsh.bootstrap.csync2_update')
+ def test_sync_file(self, mock_csync2_update):
+ bootstrap._context = mock.Mock(skip_csync2=False)
+ bootstrap.sync_file("/file1")
+ mock_csync2_update.assert_called_once_with("/file1")
+
+
+class TestValidation(unittest.TestCase):
+ """
+ Unitary tests for class bootstrap.Validation
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.validate_inst = bootstrap.Validation("10.10.10.1")
+ self.validate_port_inst_in_use = bootstrap.Validation("4567", ["4568"])
+ self.validate_port_inst_out_of_range = bootstrap.Validation("456766")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.utils.IP.is_mcast')
+ def test_is_mcast_addr(self, mock_mcast):
+ mock_mcast.return_value = False
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst._is_mcast_addr()
+ self.assertEqual("10.10.10.1 is not multicast address", str(err.exception))
+ mock_mcast.assert_called_once_with("10.10.10.1")
+
+ def test_is_local_addr(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst._is_local_addr(["20.20.20.1", "20.20.20.2"])
+ self.assertEqual("Address must be a local address (one of ['20.20.20.1', '20.20.20.2'])", str(err.exception))
+
+ def test_is_valid_port_in_use(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_port_inst_in_use._is_valid_port()
+ self.assertEqual("Port 4567 is already in use by corosync. Leave a gap between multiple rings.", str(err.exception))
+
+ def test_is_valid_port_out_of_range(self):
+ with self.assertRaises(ValueError) as err:
+ self.validate_port_inst_out_of_range._is_valid_port()
+ self.assertEqual("Valid port range should be 1025-65535", str(err.exception))
+
+ @mock.patch('crmsh.bootstrap.Validation._is_mcast_addr')
+ def test_valid_mcast_address(self, mock_mcast):
+ bootstrap.Validation.valid_mcast_address("10.10.10.1")
+ mock_mcast.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.Validation._is_local_addr')
+ def test_valid_ucast_ip(self, mock_local_addr):
+ bootstrap._context = mock.Mock(local_ip_list=["10.10.10.2", "10.10.10.3"])
+ bootstrap.Validation.valid_ucast_ip("10.10.10.1")
+ mock_local_addr.assert_called_once_with(["10.10.10.2", "10.10.10.3"])
+
+ @mock.patch('crmsh.bootstrap.Validation._is_local_addr')
+ def test_valid_mcast_ip(self, mock_local_addr):
+ bootstrap._context = mock.Mock(local_ip_list=["10.10.10.2", "10.10.10.3"],
+ local_network_list=["10.10.10.0"])
+ bootstrap.Validation.valid_mcast_ip("10.10.10.1")
+ mock_local_addr.assert_called_once_with(["10.10.10.2", "10.10.10.3", "10.10.10.0"])
+
+ @mock.patch('crmsh.bootstrap.Validation._is_valid_port')
+ def test_valid_port(self, mock_port):
+ bootstrap.Validation.valid_port("10.10.10.1")
+ mock_port.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.utils.IP.is_ipv6')
+ def test_valid_admin_ip_in_use(self, mock_ipv6, mock_invoke):
+ mock_ipv6.return_value = False
+ mock_invoke.return_value = True
+
+ with self.assertRaises(ValueError) as err:
+ self.validate_inst.valid_admin_ip("10.10.10.1")
+ self.assertEqual("Address already in use: 10.10.10.1", str(err.exception))
+
+ mock_ipv6.assert_called_once_with("10.10.10.1")
+ mock_invoke.assert_called_once_with("ping -c 1 10.10.10.1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_cluster_is_active(self, mock_context, mock_init, mock_active,
+ mock_error):
+ mock_context_inst = mock.Mock()
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_called_once_with("corosync.service")
+ mock_error.assert_called_once_with("Cluster is not active - can't execute removing action")
+
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_qdevice(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice):
+ mock_context_inst = mock.Mock(qdevice=True, cluster_node=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_error.assert_not_called()
+ mock_qdevice.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_qdevice_cluster_node(self, mock_context, mock_init, mock_active, mock_error):
+ mock_context_inst = mock.Mock(qdevice=True, cluster_node="node1")
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = True
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_called_once_with("corosync.service")
+ mock_error.assert_called_once_with("Either remove node or qdevice")
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_no_cluster_node(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_status, mock_prompt):
+ mock_context_inst = mock.Mock(yes_to_all=False, cluster_node=None, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_prompt.return_value = None
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_status.assert_called_once_with('Remove This Node from Cluster:\n You will be asked for the IP address or name of an existing node,\n which will be removed from the cluster. This command must be\n executed from a different node in the cluster.\n')
+ mock_prompt.assert_called_once_with("IP address or hostname of cluster node (e.g.: 192.168.1.1)", ".+")
+ mock_error.assert_called_once_with("No existing IP/hostname specified (use -c option)")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_no_confirm(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=False, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_confirm.return_value = False
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_error.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_called_once_with('Removing node "node1" from the cluster: Are you sure?')
+
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_self_need_force(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=False, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_confirm.return_value = True
+ mock_this_node.return_value = "node1"
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_called_once_with('Removing node "node1" from the cluster: Are you sure?')
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_called_once_with("Removing self requires --force")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.remove_self')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_self(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_self, mock_run):
+ mock_context_inst = mock.Mock(cluster_node="node1", force=True, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node1"
+ mock_this_node.return_value = "node1"
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node1')
+ mock_confirm.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_not_called()
+ mock_self.assert_called_once_with(True)
+ mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node1')
+
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove_not_in_cluster(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_list):
+ mock_context_inst = mock.Mock(cluster_node="node2", force=True, qdevice_rm_flag=None)
+ mock_context.return_value = mock_context_inst
+ mock_active.return_value = [True, True]
+ mock_hostname.return_value = "node2"
+ mock_this_node.return_value = "node1"
+ mock_list.return_value = ["node1", "node3"]
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node2')
+ mock_confirm.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_error.assert_called_once_with("Specified node node2 is not configured in cluster! Unable to remove.")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.utils.fetch_cluster_node_list_from_node')
+ @mock.patch('crmsh.bootstrap.remove_node_from_cluster')
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
+ @mock.patch('crmsh.bootstrap.remove_qdevice')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.init')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_bootstrap_remove(self, mock_context, mock_init, mock_active,
+ mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node,
+ mock_list, mock_remove, mock_fetch, mock_run):
+ mock_context_inst = mock.Mock(cluster_node="node2", qdevice_rm_flag=None, force=True)
+ mock_context.return_value = mock_context_inst
+ mock_active.side_effect = [True, False]
+ mock_hostname.return_value = "node2"
+ mock_this_node.return_value = "node1"
+ mock_list.return_value = ["node1", "node2"]
+ mock_fetch.return_value = ["node1", "node2"]
+
+ bootstrap.bootstrap_remove(mock_context_inst)
+
+ mock_init.assert_called_once_with()
+ mock_active.assert_has_calls([
+ mock.call("corosync.service"),
+ mock.call("csync2.socket")
+ ])
+ mock_qdevice.assert_not_called()
+ mock_hostname.assert_called_once_with('node2')
+ mock_confirm.assert_not_called()
+ mock_error.assert_not_called()
+ mock_remove.assert_called_once_with('node2')
+ mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node2')
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ @mock.patch('crmsh.xmlutil.listnodes')
+ @mock.patch('crmsh.utils.this_node')
+ def test_remove_self_other_nodes(self, mock_this_node, mock_list, mock_run, mock_error):
+ mock_this_node.return_value = 'node1'
+ mock_list.return_value = ["node1", "node2"]
+ mock_run.return_value = (1, '', 'err')
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(cluster_node="node1", yes_to_all=True)
+ bootstrap.remove_self()
+
+ mock_list.assert_called_once_with(include_remote_nodes=False)
+ mock_run.assert_called_once_with("node2", "crm cluster remove -y -c node1")
+ mock_error.assert_called_once_with("Failed to remove this node from node2")
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_rm_configuration_files(self, mock_run, mock_installed):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ mock_installed.return_value = True
+ bootstrap.rm_configuration_files()
+ mock_run.assert_has_calls([
+ mock.call('rm -f file1 file2', None),
+ mock.call('cp /usr/share/fillup-templates/sysconfig.sbd /etc/sysconfig/sbd', None)
+ ])
+
+ @mock.patch('crmsh.utils.get_iplist_from_name')
+ @mock.patch('crmsh.corosync.get_values')
+ def test_get_cluster_node_ip_host(self, mock_get_values, mock_get_iplist):
+ mock_get_values.return_value = ["node1", "node2"]
+ self.assertIsNone(bootstrap.get_cluster_node_ip('node1'))
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_get_iplist.assert_not_called()
+
+ @mock.patch('crmsh.utils.get_iplist_from_name')
+ @mock.patch('crmsh.corosync.get_values')
+ def test_get_cluster_node_ip(self, mock_get_values, mock_get_iplist):
+ mock_get_values.return_value = ["10.10.10.1", "10.10.10.2"]
+ mock_get_iplist.return_value = ["10.10.10.1"]
+ self.assertEqual("10.10.10.1", bootstrap.get_cluster_node_ip('node1'))
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_get_iplist.assert_called_once_with('node1')
+
+ @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_stop_services(self, mock_active, mock_status, mock_stop):
+ mock_active.side_effect = [True, True, True, True]
+ bootstrap.stop_services(bootstrap.SERVICES_STOP_LIST)
+ mock_active.assert_has_calls([
+ mock.call("corosync-qdevice.service", remote_addr=None),
+ mock.call("corosync.service", remote_addr=None),
+ mock.call("hawk.service", remote_addr=None),
+ mock.call("csync2.socket", remote_addr=None)
+ ])
+ mock_status.assert_has_calls([
+ mock.call('Stopping the %s%s', 'corosync-qdevice.service', ''),
+ mock.call('Stopping the %s%s', 'corosync.service', ''),
+ mock.call('Stopping the %s%s', 'hawk.service', ''),
+ mock.call('Stopping the %s%s', 'csync2.socket', '')
+ ])
+ mock_stop.assert_has_calls([
+ mock.call("corosync-qdevice.service", disable=True, remote_addr=None),
+ mock.call("corosync.service", disable=True, remote_addr=None),
+ mock.call("hawk.service", disable=True, remote_addr=None),
+ mock.call("csync2.socket", disable=True, remote_addr=None)
+ ])
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_rm_node_failed(self, mock_get_ip, mock_stop, mock_status, mock_invoke, mock_error, mock_rm_conf_files, mock_call_delnode):
+ mock_get_ip.return_value = '192.0.2.100'
+ mock_call_delnode.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_called_once_with("Removing the node node1")
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_invoke.assert_not_called()
+ mock_call_delnode.assert_called_once_with("node1")
+ mock_error.assert_called_once_with("Failed to remove node1.")
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_rm_csync_failed(self, mock_get_ip, mock_stop, mock_status, mock_invoke, mock_invokerc, mock_error, mock_rm_conf_files, mock_call_delnode):
+ mock_get_ip.return_value = '192.0.2.100'
+ mock_call_delnode.return_value = True
+ mock_invokerc.return_value = False
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_called_once_with("Removing the node node1")
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_invoke.assert_not_called()
+ mock_call_delnode.assert_called_once_with("node1")
+ mock_invokerc.assert_has_calls([
+ mock.call("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG))
+ ])
+ mock_error.assert_called_once_with("Removing the node node1 from {} failed".format(bootstrap.CSYNC2_CFG))
+
+ @mock.patch.object(NodeMgmt, 'call_delnode')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.rm_configuration_files')
+ @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
+ @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.bootstrap.decrease_expected_votes')
+ @mock.patch('crmsh.corosync.del_node')
+ @mock.patch('crmsh.corosync.get_values')
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invokerc')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.bootstrap.stop_services')
+ @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
+ def test_remove_node_from_cluster_hostname(self, mock_get_ip, mock_stop, mock_status,
+ mock_invoke, mock_invokerc, mock_error, mock_get_values, mock_del, mock_decrease, mock_csync2,
+ mock_adjust_priority, mock_adjust_fence_delay, mock_rm_conf_files, mock_is_active, mock_cal_delnode):
+ mock_get_ip.return_value = "10.10.10.1"
+ mock_cal_delnode.return_value = True
+ mock_invoke.side_effect = [(True, None, None)]
+ mock_invokerc.return_value = True
+ mock_get_values.return_value = ["10.10.10.1"]
+ mock_is_active.return_value = False
+
+ bootstrap._context = mock.Mock(cluster_node="node1", rm_list=["file1", "file2"])
+ bootstrap.remove_node_from_cluster('node1')
+
+ mock_get_ip.assert_called_once_with('node1')
+ mock_status.assert_has_calls([
+ mock.call("Removing the node node1"),
+ mock.call("Propagating configuration changes across the remaining nodes")
+ ])
+ mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")
+ mock_cal_delnode.assert_called_once_with("node1")
+ mock_invoke.assert_has_calls([
+ mock.call("corosync-cfgtool -R")
+ ])
+ mock_invokerc.assert_called_once_with("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG))
+ mock_error.assert_not_called()
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_del.assert_called_once_with("10.10.10.1")
+ mock_decrease.assert_called_once_with()
+ mock_csync2.assert_has_calls([
+ mock.call(bootstrap.CSYNC2_CFG),
+ mock.call("/etc/corosync/corosync.conf")
+ ])
diff --git a/test/unittests/test_bugs.py b/test/unittests/test_bugs.py
new file mode 100644
index 0000000..725b020
--- /dev/null
+++ b/test/unittests/test_bugs.py
@@ -0,0 +1,893 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import cibconfig
+from lxml import etree
+from crmsh import xmlutil
+
+factory = cibconfig.cib_factory
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+ factory._push_state()
+
+
+def teardown_function():
+ factory._pop_state()
+
+
+def test_bug41660_1():
+ xml = """<primitive id="bug41660" class="ocf" provider="pacemaker" type="Dummy"> \
+ <meta_attributes id="bug41660-meta"> \
+ <nvpair id="bug41660-meta-target-role" name="target-role" value="Stopped"/> \
+ </meta_attributes> \
+ </primitive>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive bug41660 ocf:pacemaker:Dummy meta target-role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("bug41660", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_bug41660_2():
+ xml = """
+<clone id="libvirtd-clone">
+ <primitive class="lsb" id="libvirtd" type="libvirtd">
+ <operations>
+ <op id="libvirtd-monitor-interval-15" interval="15" name="monitor" start-delay="15" timeout="15"/>
+ <op id="libvirtd-start-interval-0" interval="0" name="start" on-fail="restart" timeout="15"/>
+ <op id="libvirtd-stop-interval-0" interval="0" name="stop" on-fail="ignore" timeout="15"/>
+ </operations>
+ <meta_attributes id="libvirtd-meta_attributes"/>
+ </primitive>
+ <meta_attributes id="libvirtd-clone-meta">
+ <nvpair id="libvirtd-interleave" name="interleave" value="true"/>
+ <nvpair id="libvirtd-ordered" name="ordered" value="true"/>
+ <nvpair id="libvirtd-clone-meta-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ #data = obj.repr_cli(format_mode=-1)
+ #print data
+ #exp = 'clone libvirtd-clone libvirtd meta interleave=true ordered=true target-role=Stopped'
+ #assert data == exp
+ #assert obj.cli_use_validate()
+
+ print(etree.tostring(obj.node))
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ print("PRE", etree.tostring(obj.node))
+ set_deep_meta_attr("libvirtd-clone", "target-role", "Started")
+ print("POST", etree.tostring(obj.node))
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_bug41660_3():
+ xml = """
+<clone id="libvirtd-clone">
+ <primitive class="lsb" id="libvirtd" type="libvirtd">
+ <operations>
+ <op id="libvirtd-monitor-interval-15" interval="15" name="monitor" start-delay="15" timeout="15"/>
+ <op id="libvirtd-start-interval-0" interval="0" name="start" on-fail="restart" timeout="15"/>
+ <op id="libvirtd-stop-interval-0" interval="0" name="stop" on-fail="ignore" timeout="15"/>
+ </operations>
+ <meta_attributes id="libvirtd-meta_attributes"/>
+ </primitive>
+ <meta_attributes id="libvirtd-clone-meta_attributes">
+ <nvpair id="libvirtd-clone-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'clone libvirtd-clone libvirtd meta target-role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("libvirtd-clone", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_comments():
+ xml = """<cib epoch="25" num_updates="1" admin_epoch="0" validate-with="pacemaker-1.2" cib-last-written="Thu Mar 6 15:53:49 2014" update-origin="beta1" update-client="cibadmin" update-user="root" crm_feature_set="3.0.8" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.11-3.3-3ca8c3b"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <!--# COMMENT TEXT 1 -->
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node uname="beta1" id="1">
+ <!--# COMMENT TEXT 2 -->
+ </node>
+ </nodes>
+ <resources/>
+ <constraints/>
+ <rsc_defaults>
+ <meta_attributes id="rsc-options">
+ <nvpair name="resource-stickiness" value="1" id="rsc-options-resource-stickiness"/>
+ <!--# COMMENT TEXT 3 -->
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="beta1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources/>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-shutdown" name="shutdown" value="0"/>
+ <nvpair id="status-1-probe_complete" name="probe_complete" value="true"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>"""
+ elems = etree.fromstring(xml)
+ xmlutil.sanitize_cib(elems)
+ assert xmlutil.xml_tostring(elems).count("COMMENT TEXT") == 3
+
+
+def test_eq1():
+ xml1 = """<cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"></nvpair>
+ <nvpair id="cib-bootstrap-options-stonith-timeout" name="stonith-timeout" value="180"></nvpair>
+ <nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"></nvpair>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="freeze"></nvpair>
+ <nvpair id="cib-bootstrap-options-batch-limit" name="batch-limit" value="20"></nvpair>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-c1a326d"></nvpair>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"></nvpair>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1391433789"></nvpair>
+ <nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"></nvpair>
+ </cluster_property_set>
+ """
+ xml2 = """<cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"></nvpair>
+ <nvpair id="cib-bootstrap-options-stonith-timeout" name="stonith-timeout" value="180"></nvpair>
+ <nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"></nvpair>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="freeze"></nvpair>
+ <nvpair id="cib-bootstrap-options-batch-limit" name="batch-limit" value="20"></nvpair>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-c1a326d"></nvpair>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"></nvpair>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1391433789"></nvpair>
+ <nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"></nvpair>
+ </cluster_property_set>
+ """
+ e1 = etree.fromstring(xml1)
+ e2 = etree.fromstring(xml2)
+ assert xmlutil.xml_equals(e1, e2, show=True)
+
+
+def test_pcs_interop_1():
+ """
+ pcs<>crmsh interop bug
+ """
+
+ xml = """<clone id="dummies">
+ <meta_attributes id="dummies-meta">
+ <nvpair name="globally-unique" value="false" id="dummies-meta-globally-unique"/>
+ </meta_attributes>
+ <meta_attributes id="dummies-meta_attributes">
+ <nvpair id="dummies-meta_attributes-target-role" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="dummy-1" class="ocf" provider="heartbeat" type="Dummy"/>
+ </clone>"""
+ elem = etree.fromstring(xml)
+ from crmsh.ui_resource import set_deep_meta_attr_node
+
+ assert len(elem.xpath(".//meta_attributes/nvpair[@name='target-role']")) == 1
+
+ print("BEFORE:", etree.tostring(elem))
+
+ set_deep_meta_attr_node(elem, 'target-role', 'Stopped')
+
+ print("AFTER:", etree.tostring(elem))
+
+ assert len(elem.xpath(".//meta_attributes/nvpair[@name='target-role']")) == 1
+
+
+def test_bnc878128():
+ """
+ L3: "crm configure show" displays XML information instead of typical crm output.
+ """
+ xml = """<rsc_location id="cli-prefer-dummy-resource" rsc="dummy-resource"
+role="Started">
+ <rule id="cli-prefer-rule-dummy-resource" score="INFINITY">
+ <expression id="cli-prefer-expr-dummy-resource" attribute="#uname"
+operation="eq" value="x64-4"/>
+ <date_expression id="cli-prefer-lifetime-end-dummy-resource" operation="lt"
+end="2014-05-17 17:56:11Z"/>
+ </rule>
+</rsc_location>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'location cli-prefer-dummy-resource dummy-resource role=Started rule #uname eq x64-4 and date lt "2014-05-17 17:56:11Z"'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_order_without_score_kind():
+ """
+ Spec says order doesn't require score or kind to be set
+ """
+ xml = '<rsc_order first="a" first-action="promote" id="order-a-b" then="b" then-action="start"/>'
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'order order-a-b a:promote b:start'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+
+def test_bnc878112():
+ """
+ crm configure group can hijack a cloned primitive (and then crash)
+ """
+ obj1 = factory.create_object('primitive', 'p1', 'Dummy')
+ assert obj1 is True
+ obj2 = factory.create_object('group', 'g1', 'p1')
+ assert obj2 is True
+ obj3 = factory.create_object('group', 'g2', 'p1')
+ print(obj3)
+ assert obj3 is False
+
+
+def test_copy_nvpairs():
+ from crmsh.cibconfig import copy_nvpairs
+
+ to = etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="true"/>
+ </node>
+ ''')
+ copy_nvpairs(to, etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="false"/>
+ </node>
+ '''))
+
+ assert ['stonith-enabled'] == to.xpath('./nvpair/@name')
+ assert ['false'] == to.xpath('./nvpair/@value')
+
+ copy_nvpairs(to, etree.fromstring('''
+ <node>
+ <nvpair name="stonith-enabled" value="true"/>
+ </node>
+ '''))
+
+ assert ['stonith-enabled'] == to.xpath('./nvpair/@name')
+ assert ['true'] == to.xpath('./nvpair/@value')
+
+
+def test_pengine_test():
+ xml = '''<primitive class="ocf" id="rsc1" provider="pacemaker" type="Dummy">
+ <instance_attributes id="rsc1-instance_attributes-1">
+ <nvpair id="rsc1-instance_attributes-1-state" name="state" value="/var/run/Dummy-rsc1-clusterA"/>
+ <rule id="rsc1-instance_attributes-1-rule-1" score="0">
+ <expression id="rsc1-instance_attributes-1-rule-1-expr-1" attribute="#cluster-name" operation="eq" value="clusterA"/>
+ </rule>
+ </instance_attributes>
+ <instance_attributes id="rsc1-instance_attributes-2">
+ <nvpair id="rsc1-instance_attributes-2-state" name="state" value="/var/run/Dummy-rsc1-clusterB"/>
+ <rule id="rsc1-instance_attributes-2-rule-1" score="0">
+ <expression id="rsc1-instance_attributes-2-rule-1-expr-1" attribute="#cluster-name" operation="eq" value="clusterB"/>
+ </rule>
+ </instance_attributes>
+ <operations>
+ <op id="rsc1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc1 ocf:pacemaker:Dummy params rule 0: #cluster-name eq clusterA state="/var/run/Dummy-rsc1-clusterA" params rule 0: #cluster-name eq clusterB state="/var/run/Dummy-rsc1-clusterB" op monitor interval=10'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_tagset():
+ xml = '''<primitive class="ocf" id="%s" provider="pacemaker" type="Dummy"/>'''
+ tag = '''<tag id="t0"><obj_ref id="r1"/><obj_ref id="r2"/></tag>'''
+ factory.create_from_node(etree.fromstring(xml % ('r1')))
+ factory.create_from_node(etree.fromstring(xml % ('r2')))
+ factory.create_from_node(etree.fromstring(xml % ('r3')))
+ factory.create_from_node(etree.fromstring(tag))
+ elems = factory.get_elems_on_tag("tag:t0")
+ assert set(x.obj_id for x in elems) == set(['r1', 'r2'])
+
+
+def test_op_role():
+ xml = '''<primitive class="ocf" id="rsc2" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="rsc2-monitor-10" interval="10" name="monitor" role="Stopped"/>
+ </operations>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc2 ocf:pacemaker:Dummy op monitor interval=10 role=Stopped'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nvpair_no_value():
+ xml = '''<primitive class="ocf" id="rsc3" provider="heartbeat" type="Dummy">
+ <instance_attributes id="rsc3-instance_attributes-1">
+ <nvpair id="rsc3-instance_attributes-1-verbose" name="verbose"/>
+ <nvpair id="rsc3-instance_attributes-1-verbase" name="verbase" value=""/>
+ <nvpair id="rsc3-instance_attributes-1-verbese" name="verbese" value=" "/>
+ </instance_attributes>
+ </primitive>'''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive rsc3 Dummy params verbose verbase="" verbese=" "'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_delete_ticket():
+ xml0 = '<primitive id="daa0" class="ocf" provider="heartbeat" type="Dummy"/>'
+ xml1 = '<primitive id="daa1" class="ocf" provider="heartbeat" type="Dummy"/>'
+ xml2 = '''<rsc_ticket id="taa0" ticket="taaA">
+ <resource_set id="taa0-0">
+ <resource_ref id="daa0"/>
+ <resource_ref id="daa1"/>
+ </resource_set>
+ </rsc_ticket>'''
+ for x in (xml0, xml1, xml2):
+ data = etree.fromstring(x)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+
+ factory.delete('daa0')
+ assert factory.find_object('daa0') is None
+ assert factory.find_object('taa0') is not None
+
+
+def test_quotes():
+ """
+ Parsing escaped quotes
+ """
+ xml = '''<primitive class="ocf" id="q1" provider="pacemaker" type="Dummy">
+ <instance_attributes id="q1-instance_attributes-1">
+ <nvpair id="q1-instance_attributes-1-state" name="state" value="foo&quot;foo&quot;"/>
+ </instance_attributes>
+ </primitive>
+ '''
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ print("OUTPUT:", data)
+ exp = 'primitive q1 ocf:pacemaker:Dummy params state="foo\\"foo\\""'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nodeattrs():
+ """
+ bug with parsing node attrs
+ """
+ xml = '''<node id="1" uname="dell71"> \
+ <instance_attributes id="dell71-instance_attributes"> \
+ <nvpair name="staging-0-0-placement" value="true" id="dell71-instance_attributes-staging-0-0-placement"/> \
+ <nvpair name="meta-0-0-placement" value="true" id="dell71-instance_attributes-meta-0-0-placement"/> \
+ </instance_attributes> \
+ <instance_attributes id="nodes-1"> \
+ <nvpair id="nodes-1-standby" name="standby" value="off"/> \
+ </instance_attributes> \
+</node>'''
+
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ exp = 'node 1: dell71 attributes staging-0-0-placement=true meta-0-0-placement=true attributes standby=off'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_nodeattrs2():
+ xml = """<node id="h04" uname="h04"> \
+ <utilization id="h04-utilization"> \
+ <nvpair id="h04-utilization-utl_ram" name="utl_ram" value="1200"/> \
+ <nvpair id="h04-utilization-utl_cpu" name="utl_cpu" value="200"/> \
+ </utilization> \
+ <instance_attributes id="nodes-h04"> \
+ <nvpair id="nodes-h04-standby" name="standby" value="off"/> \
+ </instance_attributes> \
+</node>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert obj is not None
+ data = obj.repr_cli(format_mode=-1)
+ exp = 'node h04 utilization utl_ram=1200 utl_cpu=200 attributes standby=off'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_group_constraint_location():
+ """
+ configuring a location constraint on a grouped resource is OK
+ """
+ factory.create_object('node', 'node1')
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('location', 'loc-p1', 'p1', 'inf:', 'node1')
+ c = factory.find_object('loc-p1')
+ assert c and c.check_sanity() == 0
+
+
+def test_group_constraint_colocation():
+ """
+ configuring a colocation constraint on a grouped resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('colocation', 'coloc-p1-p2', 'inf:', 'p1', 'p2')
+ c = factory.find_object('coloc-p1-p2')
+ assert c and c.check_sanity() > 0
+
+
+def test_group_constraint_colocation_rscset():
+ """
+ configuring a constraint on a grouped resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('primitive', 'p3', 'Dummy')
+ factory.create_object('group', 'g1', 'p1', 'p2')
+ factory.create_object('colocation', 'coloc-p1-p2-p3', 'inf:', 'p1', 'p2', 'p3')
+ c = factory.find_object('coloc-p1-p2-p3')
+ assert c and c.check_sanity() > 0
+
+
+def test_clone_constraint_colocation_rscset():
+ """
+ configuring a constraint on a cloned resource is bad
+ """
+ factory.create_object('primitive', 'p1', 'Dummy')
+ factory.create_object('primitive', 'p2', 'Dummy')
+ factory.create_object('primitive', 'p3', 'Dummy')
+ factory.create_object('clone', 'c1', 'p1')
+ factory.create_object('colocation', 'coloc-p1-p2-p3', 'inf:', 'p1', 'p2', 'p3')
+ c = factory.find_object('coloc-p1-p2-p3')
+ assert c and c.check_sanity() > 0
+
+
+def test_existing_node_resource():
+ factory.create_object('primitive', 'ha-one', 'Dummy')
+
+ n = factory.find_node('ha-one')
+ assert factory.test_element(n)
+
+ r = factory.find_resource('ha-one')
+ assert factory.test_element(r)
+
+ assert n != r
+
+ assert factory.check_structure()
+ factory.cli_use_validate_all()
+
+ ok, s = factory.mkobj_set('ha-one')
+ assert ok
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_existing_node_resource_2(mock_incr, mock_line_num):
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ from crmsh import clidisplay
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text += "\nprimitive ha-one Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ text2 = obj.repr()
+
+ assert sorted(text.split('\n')) == sorted(text2.split('\n'))
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_1(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ ok = obj.save("""node node1
+primitive p0 ocf:pacemaker:Dummy
+primitive p1 ocf:pacemaker:Dummy
+primitive p2 ocf:heartbeat:Delay \
+ params startdelay=2 mondelay=2 stopdelay=2
+primitive p3 ocf:pacemaker:Dummy
+primitive st stonith:null params hostlist=node1
+clone c1 p1
+ms m1 p2
+op_defaults timeout=60s
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""op_defaults timeout=2m
+node node1 \
+ attributes mem=16G
+primitive st stonith:null \
+ params hostlist='node1' \
+ meta description="some description here" requires=nothing \
+ op monitor interval=60m
+primitive p1 ocf:heartbeat:Dummy \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text + "\nprimitive p2 ocf:heartbeat:Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text + "\ngroup g1 p1 p2"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj("g1")
+ with clidisplay.nopretty():
+ text = obj.repr()
+ text = text.replace("group g1 p1 p2", "group g1 p1 p3")
+ text = text + "\nprimitive p3 ocf:heartbeat:Dummy"
+ ok = obj.save(text)
+ assert ok
+
+ obj = cibconfig.mkset_obj("g1")
+ with clidisplay.nopretty():
+ print(obj.repr().strip())
+ assert obj.repr().strip() == "group g1 p1 p3"
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_3(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""node node1
+primitive node1 Dummy params fake=something
+ """)
+ assert ok
+
+ print("** baseline")
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ print(obj.repr())
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save("""primitive node1 Dummy params fake=something-else
+ """, remove=False, method='update')
+ assert ok
+
+ print("** end")
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib, remove=True, method='replace')
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_id_collision_breakage_2(mock_incr, mock_line_num):
+ from crmsh import clidisplay
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ with clidisplay.nopretty():
+ original_cib = obj.repr()
+ print(original_cib)
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ ok = obj.save("""node 168633610: webui
+node 168633611: node1
+rsc_template web-server apache \
+ params port=8000 \
+ op monitor interval=10s
+primitive d0 Dummy \
+ meta target-role=Started
+primitive d1 Dummy
+primitive d2 Dummy
+# Never use this STONITH agent in production!
+primitive development-stonith stonith:null \
+ params hostlist="webui node1 node2 node3"
+primitive proxy systemd:haproxy \
+ op monitor interval=10s
+primitive proxy-vip IPaddr2 \
+ params ip=10.13.37.20
+primitive srv1 @web-server
+primitive srv2 @web-server
+primitive vip1 IPaddr2 \
+ params ip=10.13.37.21 \
+ op monitor interval=20s
+primitive vip2 IPaddr2 \
+ params ip=10.13.37.22 \
+ op monitor interval=20s
+primitive virtual-ip IPaddr2 \
+ params ip=10.13.37.77 lvs_support=false \
+ op start timeout=20 interval=0 \
+ op stop timeout=20 interval=0 \
+ op monitor interval=10 timeout=20
+primitive yet-another-virtual-ip IPaddr2 \
+ params ip=10.13.37.72 cidr_netmask=24 \
+ op start interval=0 timeout=20 \
+ op stop interval=0 timeout=20 \
+ op monitor interval=10 timeout=20 \
+ meta target-role=Started
+group dovip d0 virtual-ip \
+ meta target-role=Stopped
+group g-proxy proxy-vip proxy
+group g-serv1 vip1 srv1
+group g-serv2 vip2 srv2
+clone d2-clone d2 \
+ meta target-role=Started
+tag dummytag d0 d1 d1-on-node1 d2 d2-clone
+# Never put the two web servers on the same node
+colocation co-serv -inf: g-serv1 g-serv2
+location d1-on-node1 d1 inf: node1
+# Never put any web server or haproxy on webui
+location l-avoid-webui { g-proxy g-serv1 g-serv2 } -inf: webui
+# Prever to spread groups across nodes
+location l-proxy g-proxy 200: node1
+location l-serv1 g-serv1 200: node2
+location l-serv2 g-serv2 200: node3
+property cib-bootstrap-options: \
+ have-watchdog=false \
+ dc-version="1.1.13+git20150917.20c2178-224.2-1.1.13+git20150917.20c2178" \
+ cluster-infrastructure=corosync \
+ cluster-name=hacluster \
+ stonith-enabled=true \
+ no-quorum-policy=ignore
+rsc_defaults rsc-options: \
+ resource-stickiness=1 \
+ migration-threshold=3
+op_defaults op-options: \
+ timeout=600 \
+ record-pending=true
+""")
+ assert ok
+
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+ ok = obj.save(original_cib)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ with clidisplay.nopretty():
+ print("*** ORIGINAL")
+ print(original_cib)
+ print("*** NOW")
+ print(obj.repr())
+ assert original_cib == obj.repr()
+
+
+def test_bug_110():
+ """
+ configuring attribute-based fencing-topology
+ """
+ factory.create_object(*"primitive stonith-libvirt stonith:null".split())
+ factory.create_object(*"primitive fence-nova stonith:null".split())
+ cmd = "fencing_topology attr:OpenStack-role=compute stonith-libvirt,fence-nova".split()
+ ok = factory.create_object(*cmd)
+ assert ok
+ obj = cibconfig.mkset_obj()
+ assert obj is not None
+
+ for o in obj.obj_set:
+ if o.node.tag == 'fencing-topology':
+ assert o.check_sanity() == 0
+
+
+@mock.patch("crmsh.log.LoggerUtils.line_number")
+@mock.patch("crmsh.log.LoggerUtils.incr_lineno")
+def test_reordering_resource_sets(mock_incr, mock_line_num):
+ """
+ Can we reorder resource sets?
+ """
+ from crmsh import clidisplay
+ obj1 = factory.create_object('primitive', 'p1', 'Dummy')
+ assert obj1 is True
+ obj2 = factory.create_object('primitive', 'p2', 'Dummy')
+ assert obj2 is True
+ obj3 = factory.create_object('primitive', 'p3', 'Dummy')
+ assert obj3 is True
+ obj4 = factory.create_object('primitive', 'p4', 'Dummy')
+ assert obj4 is True
+ o1 = factory.create_object('order', 'o1', 'p1', 'p2', 'p3', 'p4')
+ assert o1 is True
+
+ obj = cibconfig.mkset_obj('o1')
+ assert obj is not None
+ rc = obj.save('order o1 p4 p3 p2 p1')
+ assert rc == True
+
+ obj2 = cibconfig.mkset_obj('o1')
+ with clidisplay.nopretty():
+ assert "order o1 p4 p3 p2 p1" == obj2.repr().strip()
+
+
+def test_bug959895():
+ """
+ Allow importing XML with cloned groups
+ """
+ xml = """<clone id="c-bug959895">
+ <group id="g-bug959895">
+ <primitive id="p-bug959895-a" class="ocf" provider="pacemaker" type="Dummy" />
+ <primitive id="p-bug959895-b" class="ocf" provider="pacemaker" type="Dummy" />
+ </group>
+</clone>
+"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'clone c-bug959895 g-bug959895'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+ commit_holder = factory.commit
+ try:
+ factory.commit = lambda *args: True
+ from crmsh.ui_resource import set_deep_meta_attr
+ set_deep_meta_attr("c-bug959895", "target-role", "Started")
+ assert ['Started'] == obj.node.xpath('.//nvpair[@name="target-role"]/@value')
+ finally:
+ factory.commit = commit_holder
+
+
+def test_node_util_attr():
+ """
+ Handle node with utitilization before attributes correctly
+ """
+ xml = """<node id="aberfeldy" uname="aberfeldy">
+ <utilization id="nodes-aberfeldy-utilization">
+ <nvpair id="nodes-aberfeldy-utilization-cpu" name="cpu" value="2"/>
+ <nvpair id="nodes-aberfeldy-utilization-memory" name="memory" value="500"/>
+ </utilization>
+ <instance_attributes id="nodes-aberfeldy">
+ <nvpair id="nodes-aberfeldy-standby" name="standby" value="on"/>
+ </instance_attributes>
+</node>"""
+
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ print(etree.tostring(obj.node))
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'node aberfeldy utilization cpu=2 memory=500 attributes standby=on'
+ assert data == exp
+ assert obj.cli_use_validate()
+
+
+def test_dup_create_same_name():
+ """
+ Creating two objects with the same name
+ """
+ ok = factory.create_object(*"primitive dup1 Dummy".split())
+ assert ok
+ ok = factory.create_object(*"primitive dup1 Dummy".split())
+ assert not ok
+
+
+def test_dup_create():
+ """
+ Creating property sets with unknown properties
+ """
+ ok = factory.create_object(*"property hana_test1: hana_attribute_1=5 hana_attribute_2=mohican".split())
+ assert ok
+ ok = factory.create_object(*"property hana_test2: hana_attribute_1=5s a-b-c-d=e-f-g".split())
+ assert ok
diff --git a/test/unittests/test_cib.py b/test/unittests/test_cib.py
new file mode 100644
index 0000000..def915f
--- /dev/null
+++ b/test/unittests/test_cib.py
@@ -0,0 +1,32 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+from crmsh import cibconfig
+from lxml import etree
+import copy
+
+factory = cibconfig.cib_factory
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ pass
+
+
+def test_cib_schema_change():
+ "Changing the validate-with CIB attribute"
+ copy_of_cib = copy.copy(factory.cib_orig)
+ print(etree.tostring(copy_of_cib, pretty_print=True))
+ tmp_cib_objects = factory.cib_objects
+ factory.cib_objects = []
+ factory.change_schema("pacemaker-1.1")
+ factory.cib_objects = tmp_cib_objects
+ factory._copy_cib_attributes(copy_of_cib, factory.cib_orig)
+ assert factory.cib_attrs["validate-with"] == "pacemaker-1.1"
+ assert factory.cib_elem.get("validate-with") == "pacemaker-1.1"
diff --git a/test/unittests/test_cliformat.py b/test/unittests/test_cliformat.py
new file mode 100644
index 0000000..2eb25b5
--- /dev/null
+++ b/test/unittests/test_cliformat.py
@@ -0,0 +1,324 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for cliformat.py
+
+from crmsh import cibconfig
+from crmsh import parse
+from lxml import etree
+from .test_parse import MockValidation
+
+factory = cibconfig.cib_factory
+
+
+def assert_is_not_none(thing):
+ assert thing is not None, "Expected non-None value"
+
+
+def roundtrip(cli, debug=False, expected=None, format_mode=-1, strip_color=False):
+ parse.validator = MockValidation()
+ node, _, _ = cibconfig.parse_cli_to_xml(cli)
+ assert_is_not_none(node)
+ obj = factory.find_object(node.get("id"))
+ if obj:
+ factory.delete(node.get("id"))
+ obj = factory.create_from_node(node)
+ assert_is_not_none(obj)
+ obj.nocli = True
+ xml = obj.repr_cli(format_mode=format_mode)
+ print(xml)
+ obj.nocli = False
+ s = obj.repr_cli(format_mode=format_mode)
+ if strip_color:
+ import re
+ s = re.sub(r"\$\{[^}]+\}", "", s)
+ if (s != cli) or debug:
+ print("GOT:", s)
+ print("EXP:", cli)
+ assert obj.cli_use_validate()
+ if expected is not None:
+ assert expected == s
+ else:
+ assert cli == s
+ assert not debug
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ "tear down test fixtures"
+
+
+def test_rscset():
+ roundtrip('colocation foo inf: a b')
+ roundtrip('order order_2 Mandatory: [ A B ] C')
+ roundtrip('rsc_template public_vm Xen')
+
+
+''' Seems rely on cluster env, should be in functional test
+def test_normalize():
+ """
+ Test automatic normalization of parameter names:
+ "shutdown_timeout" is a parameter name, but
+ "shutdown-timeout" is not.
+ """
+ roundtrip('primitive vm1 Xen params shutdown-timeout=0',
+ expected='primitive vm1 Xen params shutdown_timeout=0')
+'''
+
+
+def test_group():
+ factory.create_from_cli('primitive p1 Dummy')
+ roundtrip('group g1 p1 params target-role=Stopped')
+
+
+def test_bnc863736():
+ roundtrip('order order_3 Mandatory: [ A B ] C symmetrical=true')
+
+
+def test_sequential():
+ roundtrip('colocation rsc_colocation-master inf: [ vip-master vip-rep sequential=true ] [ msPostgresql:Master sequential=true ]')
+
+
+def test_broken_colo():
+ xml = """<rsc_colocation id="colo-2" score="INFINITY">
+ <resource_set id="colo-2-0" require-all="false">
+ <resource_ref id="vip1"/>
+ <resource_ref id="vip2"/>
+ </resource_set>
+ <resource_set id="colo-2-1" require-all="false" role="Master">
+ <resource_ref id="apache"/>
+ </resource_set>
+</rsc_colocation>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ assert 'colocation colo-2 inf: [ vip1 vip2 sequential=true ] [ apache:Master sequential=true ]' == data
+ assert obj.cli_use_validate()
+
+
+def test_comment():
+ roundtrip("# comment 1\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
+
+
+def test_comment2():
+ roundtrip("# comment 1\n# comment 2\n# comment 3\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
+
+
+def test_nvpair_ref1():
+ factory.create_from_cli("primitive dummy-0 Dummy params $fiz:buz=bin")
+ roundtrip('primitive dummy-1 Dummy params @fiz:boz')
+
+
+def test_idresolve():
+ factory.create_from_cli("primitive dummy-5 Dummy params buz=bin")
+ roundtrip('primitive dummy-1 Dummy params @dummy-5-instance_attributes-buz')
+
+
+def test_ordering():
+ xml = """<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"> \
+ <operations> \
+ <op name="start" timeout="60" interval="0" id="dummy-start-0"/> \
+ <op name="stop" timeout="60" interval="0" id="dummy-stop-0"/> \
+ <op name="monitor" interval="60" timeout="30" id="dummy-monitor-60"/> \
+ </operations> \
+ <meta_attributes id="dummy-meta_attributes"> \
+ <nvpair id="dummy-meta_attributes-target-role" name="target-role"
+value="Stopped"/> \
+ </meta_attributes> \
+</primitive>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive dummy ocf:pacemaker:Dummy op start timeout=60 interval=0 op stop timeout=60 interval=0 op monitor interval=60 timeout=30 meta target-role=Stopped'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_ordering2():
+ xml = """<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"> \
+ <meta_attributes id="dummy2-meta_attributes"> \
+ <nvpair id="dummy2-meta_attributes-target-role" name="target-role"
+value="Stopped"/> \
+ </meta_attributes> \
+ <operations> \
+ <op name="start" timeout="60" interval="0" id="dummy2-start-0"/> \
+ <op name="stop" timeout="60" interval="0" id="dummy2-stop-0"/> \
+ <op name="monitor" interval="60" timeout="30" id="dummy2-monitor-60"/> \
+ </operations> \
+</primitive>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'primitive dummy2 ocf:pacemaker:Dummy meta target-role=Stopped ' \
+ 'op start timeout=60 interval=0 op stop timeout=60 interval=0 ' \
+ 'op monitor interval=60 timeout=30'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_fencing():
+ xml = """<fencing-topology>
+ <fencing-level devices="st1" id="fencing" index="1"
+target="ha-three"></fencing-level>
+ <fencing-level devices="st1" id="fencing-0" index="1"
+target="ha-two"></fencing-level>
+ <fencing-level devices="st1" id="fencing-1" index="1"
+target="ha-one"></fencing-level>
+ </fencing-topology>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'fencing_topology st1'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_fencing2():
+ xml = """<fencing-topology>
+ <fencing-level devices="apple" id="fencing" index="1"
+target-pattern="green.*"></fencing-level>
+ <fencing-level devices="pear" id="fencing" index="2"
+target-pattern="green.*"></fencing-level>
+ <fencing-level devices="pear" id="fencing" index="1"
+target-pattern="red.*"></fencing-level>
+ <fencing-level devices="apple" id="fencing" index="2"
+target-pattern="red.*"></fencing-level>
+ </fencing-topology>"""
+ data = etree.fromstring(xml)
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ data = obj.repr_cli(format_mode=-1)
+ print(data)
+ exp = 'fencing_topology pattern:green.* apple pear pattern:red.* pear apple'
+ assert exp == data
+ assert obj.cli_use_validate()
+
+
+def test_master():
+ xml = """<master id="ms-1">
+ <crmsh-ref id="dummy3" />
+ </master>
+ """
+ data = etree.fromstring(xml)
+ factory.create_from_cli("primitive dummy3 ocf:pacemaker:Dummy")
+ data, _, _ = cibconfig.postprocess_cli(data)
+ print("after postprocess:", etree.tostring(data))
+ obj = factory.create_from_node(data)
+ assert_is_not_none(obj)
+ assert obj.cli_use_validate()
+
+
+def test_param_rules():
+ roundtrip('primitive foo Dummy ' +
+ 'params rule #uname eq wizbang laser=yes ' +
+ 'params rule #uname eq gandalf staff=yes')
+
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'params 3: rule #uname eq node1 interface=eth1 ' +
+ 'params 2: rule #uname eq node2 interface=eth2 port=8888 ' +
+ 'params 1: interface=eth0 port=9999')
+
+
+def test_operation_rules():
+ roundtrip('primitive test Dummy ' +
+ 'op start interval=0 '
+ 'op_params 2: rule #uname eq node1 fake=fake ' +
+ 'op_params 1: fake=real ' +
+ 'op_meta 2: rule #ra-version version:gt 1.0 timeout=120s ' +
+ 'op_meta 1: timeout=60s')
+
+
+def test_multiple_attrsets():
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'params 3: interface=eth1 ' +
+ 'params 2: port=8888')
+ roundtrip('primitive mySpecialRsc me:Special ' +
+ 'meta 3: interface=eth1 ' +
+ 'meta 2: port=8888')
+
+
+def test_new_acls():
+ roundtrip('role fum description=test read description=test2 xpath:"*[@name=karl]"')
+
+
+def test_acls_reftype():
+ roundtrip('role boo deny ref:d0 type:nvpair',
+ expected='role boo deny ref:d0 deny type:nvpair')
+
+
+def test_acls_oldsyntax():
+ roundtrip('role boo deny ref:d0 tag:nvpair',
+ expected='role boo deny ref:d0 deny type:nvpair')
+
+
+def test_rules():
+ roundtrip('primitive p1 Dummy params ' +
+ 'rule $role=Started date in start=2009-05-26 end=2010-05-26 ' +
+ 'or date gt 2014-01-01 state=2')
+
+
+def test_new_role():
+ roundtrip('role silly-role-2 read xpath:"//nodes//attributes" ' +
+ 'deny type:nvpair deny ref:d0 deny type:nvpair')
+
+
+def test_topology_1114():
+ roundtrip('fencing_topology attr:rack=1 node1,node2')
+
+
+def test_topology_1114_pattern():
+ roundtrip('fencing_topology pattern:.* network disk')
+
+
+def test_locrule():
+ roundtrip('location loc-testfs-with-eth1 testfs rule ethmonitor-eth1 eq 1')
+
+
+def test_is_value_sane():
+ roundtrip('''primitive p1 Dummy params state="bo'o"''')
+
+
+def test_is_value_sane_2():
+ roundtrip('primitive p1 Dummy params state="bo\\"o"')
+
+
+def test_alerts_1():
+ roundtrip('alert alert1 "/tmp/foo.sh" to "/tmp/bar.log"')
+
+
+def test_alerts_2():
+ roundtrip('alert alert2 "/tmp/foo.sh" attributes foo=bar to "/tmp/bar.log"')
+
+
+def test_alerts_3():
+ roundtrip('alert alert3 "a path here" meta baby to "/tmp/bar.log"')
+
+
+def test_alerts_4():
+ roundtrip('alert alert4 "/also/a/path"')
+
+
+def test_alerts_5():
+ roundtrip('alert alert5 "/a/path" to { "/another/path" } meta timeout=30s')
+
+
+def test_alerts_6():
+ roundtrip('alert alert6 "/a/path" select fencing attributes { standby } to { "/another/path" } meta timeout=30s')
+
+
+def test_alerts_7():
+ roundtrip('alert alert7 "/a/path" select fencing attributes foo=bar to { "/another/path" } meta timeout=30s')
diff --git a/test/unittests/test_corosync.py b/test/unittests/test_corosync.py
new file mode 100644
index 0000000..2443f36
--- /dev/null
+++ b/test/unittests/test_corosync.py
@@ -0,0 +1,488 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parse.py
+
+from builtins import str
+from builtins import object
+import os
+import unittest
+import pytest
+from unittest import mock
+from crmsh import corosync
+from crmsh.corosync import Parser, make_section, make_value
+
+
+F1 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.1')).read()
+F2 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.2')).read()
+F3 = open(os.path.join(os.path.dirname(__file__), 'bug-862577_corosync.conf')).read()
+F4 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.3')).read()
+
+
+def _valid(parser):
+ depth = 0
+ for t in parser._tokens:
+ if t.token not in (corosync._tCOMMENT,
+ corosync._tBEGIN,
+ corosync._tEND,
+ corosync._tVALUE):
+ raise AssertionError("illegal token " + str(t))
+ if t.token == corosync._tBEGIN:
+ depth += 1
+ if t.token == corosync._tEND:
+ depth -= 1
+ if depth != 0:
+ raise AssertionError("Unbalanced sections")
+
+
+def _print(parser):
+ print(parser.to_string())
+
+
+def test_query_status_exception():
+ with pytest.raises(ValueError) as err:
+ corosync.query_status("test")
+ assert str(err.value) == "Wrong type \"test\" to query status"
+
+
+@mock.patch('crmsh.corosync.query_ring_status')
+def test_query_status(mock_ring_status):
+ corosync.query_status("ring")
+ mock_ring_status.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.is_qdevice_configured')
+def test_query_qdevice_status_exception(mock_configured):
+ mock_configured.return_value = False
+ with pytest.raises(ValueError) as err:
+ corosync.query_qdevice_status()
+ assert str(err.value) == "QDevice/QNetd not configured!"
+ mock_configured.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.print_cluster_nodes')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.is_qdevice_configured')
+def test_query_qdevice_status(mock_configured, mock_run, mock_print):
+ mock_configured.return_value = True
+ corosync.query_qdevice_status()
+ mock_run.assert_called_once_with("corosync-qdevice-tool -sv")
+ mock_print.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_ring_status")
+def test_query_status_ring(mock_ring_status):
+ corosync.query_status("ring")
+ mock_ring_status.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_quorum_status")
+def test_query_status_quorum(mock_quorum_status):
+ corosync.query_status("quorum")
+ mock_quorum_status.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.query_qnetd_status")
+def test_query_status_qnetd(mock_qnetd_status):
+ corosync.query_status("qnetd")
+ mock_qnetd_status.assert_called_once_with()
+
+
+def test_query_status_except():
+ with pytest.raises(ValueError) as err:
+ corosync.query_status("xxx")
+ assert str(err.value) == "Wrong type \"xxx\" to query status"
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_ring_status_except(mock_run):
+ mock_run.return_value = (1, None, "error")
+ with pytest.raises(ValueError) as err:
+ corosync.query_ring_status()
+ assert str(err.value) == "error"
+ mock_run.assert_called_once_with("corosync-cfgtool -s")
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_ring_status(mock_run):
+ mock_run.return_value = (0, "data", None)
+ corosync.query_ring_status()
+ mock_run.assert_called_once_with("corosync-cfgtool -s")
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status_except(mock_run, mock_print_nodes):
+ mock_run.return_value = (1, None, "error")
+ with pytest.raises(ValueError) as err:
+ corosync.query_quorum_status()
+ assert str(err.value) == "error"
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status(mock_run, mock_print_nodes):
+ mock_run.return_value = (0, "data", None)
+ corosync.query_quorum_status()
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_query_quorum_status_no_quorum(mock_run, mock_print_nodes):
+ mock_run.return_value = (2, "no quorum", None)
+ corosync.query_quorum_status()
+ mock_run.assert_called_once_with("corosync-quorumtool -s")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_qdevice(mock_qdevice_configured):
+ mock_qdevice_configured.return_value = False
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "QDevice/QNetd not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+
+
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_cluster_name(mock_qdevice_configured, mock_get_value):
+ mock_qdevice_configured.return_value = True
+ mock_get_value.return_value = None
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "cluster_name not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_called_once_with("totem.cluster_name")
+
+
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_no_host(mock_qdevice_configured, mock_get_value):
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", None]
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert str(err.value) == "host for qnetd not configured!"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+
+
+@mock.patch('crmsh.utils.user_pair_for_ssh')
+@mock.patch("crmsh.parallax.parallax_call")
+@mock.patch("crmsh.utils.ssh_copy_id")
+@mock.patch('crmsh.bootstrap.configure_ssh_key')
+@mock.patch("crmsh.utils.check_ssh_passwd_need")
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_copy_id_failed(mock_qdevice_configured,
+ mock_get_value, mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_user_pair_for_ssh):
+ mock_user_pair_for_ssh.return_value = "alice", "root"
+ mock_parallax_call.side_effect = ValueError("Failed on 10.10.10.123: foo")
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", "10.10.10.123"]
+ mock_check_passwd.return_value = True
+ with pytest.raises(ValueError) as err:
+ corosync.query_qnetd_status()
+ assert err.value.args[0] == "Failed on 10.10.10.123: foo"
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+ mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123")
+ mock_config_ssh_key.assert_called_once_with('alice')
+ mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123')
+
+
+@mock.patch('crmsh.utils.user_pair_for_ssh')
+@mock.patch("crmsh.utils.print_cluster_nodes")
+@mock.patch("crmsh.parallax.parallax_call")
+@mock.patch("crmsh.utils.ssh_copy_id")
+@mock.patch('crmsh.bootstrap.configure_ssh_key')
+@mock.patch("crmsh.utils.check_ssh_passwd_need")
+@mock.patch("crmsh.corosync.get_value")
+@mock.patch("crmsh.utils.is_qdevice_configured")
+def test_query_qnetd_status_copy(mock_qdevice_configured, mock_get_value,
+ mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_print_nodes,
+ mock_user_pair_for_ssh):
+ mock_user_pair_for_ssh.return_value = "alice", "root"
+ mock_qdevice_configured.return_value = True
+ mock_get_value.side_effect = ["hacluster", "10.10.10.123"]
+ mock_check_passwd.return_value = True
+ mock_parallax_call.return_value = [("node1", (0, "data", None)), ]
+
+ corosync.query_qnetd_status()
+
+ mock_qdevice_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("totem.cluster_name"),
+ mock.call("quorum.device.net.host")
+ ])
+ mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123")
+ mock_config_ssh_key.assert_called_once_with('alice')
+ mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123')
+ mock_parallax_call.assert_called_once_with(["10.10.10.123"], "corosync-qnetd-tool -lv -c hacluster")
+ mock_print_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.get_nodeinfo_from_cmaptool')
+@mock.patch('crmsh.corosync.add_node_ucast')
+def test_add_nodelist_from_cmaptool(mock_add_ucast, mock_nodeinfo):
+ mock_nodeinfo.return_value = {'1': ['10.10.10.1', '20.20.20.1'],'2': ['10.10.10.2', '20.20.20.2']}
+
+ corosync.add_nodelist_from_cmaptool()
+
+ mock_nodeinfo.assert_called_once_with()
+ mock_add_ucast.assert_has_calls([
+ mock.call(['10.10.10.1', '20.20.20.1'], '1'),
+ mock.call(['10.10.10.2', '20.20.20.2'], '2')
+ ])
+
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_unicast(mock_get_value):
+ mock_get_value.return_value = "udpu"
+ assert corosync.is_unicast() is True
+ mock_get_value.assert_called_once_with("totem.transport")
+
+
+@mock.patch('crmsh.corosync.get_corosync_value_dict')
+def test_token_and_consensus_timeout(mock_get_dict):
+ mock_get_dict.return_value = {"token": 10, "consensus": 12}
+ assert corosync.token_and_consensus_timeout() == 22
+
+
+@mock.patch('crmsh.corosync.get_corosync_value')
+def test_get_corosync_value_dict(mock_get_value):
+ mock_get_value.side_effect = ["10000", None]
+ res = corosync.get_corosync_value_dict()
+ assert res == {"token": 10, "consensus": 12}
+
+
+@mock.patch('crmsh.corosync.get_value')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_corosync_value_raise(mock_run, mock_get_value):
+ mock_run.side_effect = ValueError
+ mock_get_value.return_value = None
+ assert corosync.get_corosync_value("xxx") is None
+ mock_run.assert_called_once_with("corosync-cmapctl xxx")
+ mock_get_value.assert_called_once_with("xxx")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_corosync_value(mock_run):
+ mock_run.return_value = "totem.token = 10000"
+ assert corosync.get_corosync_value("totem.token") == "10000"
+ mock_run.assert_called_once_with("corosync-cmapctl totem.token")
+
+
+class TestCorosyncParser(unittest.TestCase):
+ def test_parse(self):
+ p = Parser(F1)
+ _valid(p)
+ self.assertEqual(p.get('logging.logfile'), '/var/log/cluster/corosync.log')
+ self.assertEqual(p.get('totem.interface.ttl'), '1')
+ p.set('totem.interface.ttl', '2')
+ _valid(p)
+ self.assertEqual(p.get('totem.interface.ttl'), '2')
+ p.remove('quorum')
+ _valid(p)
+ self.assertEqual(p.count('quorum'), 0)
+ p.add('', make_section('quorum', []))
+ _valid(p)
+ self.assertEqual(p.count('quorum'), 1)
+ p.set('quorum.votequorum', '2')
+ _valid(p)
+ self.assertEqual(p.get('quorum.votequorum'), '2')
+ p.set('bananas', '5')
+ _valid(p)
+ self.assertEqual(p.get('bananas'), '5')
+
+ def test_udpu(self):
+ p = Parser(F2)
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 5)
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', '10.10.10.10') +
+ make_value('nodelist.node.nodeid', str(corosync.get_free_nodeid(p)))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 6)
+ self.assertEqual(p.get_all('nodelist.node.nodeid'),
+ ['1', '2', '3'])
+
+ def test_add_node_no_nodelist(self):
+ "test checks that if there is no nodelist, no node is added"
+ from crmsh.corosync import make_section, make_value, get_free_nodeid
+
+ p = Parser(F1)
+ _valid(p)
+ nid = get_free_nodeid(p)
+ self.assertEqual(p.count('nodelist.node'), nid - 1)
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', 'foo') +
+ make_value('nodelist.node.nodeid', str(nid))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), nid - 1)
+
+ @mock.patch("crmsh.utils.InterfacesInfo.get_local_ip_list")
+ @mock.patch("crmsh.utils.IP.is_ipv6")
+ @mock.patch("re.search")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_find_configured_ip_no_exception(self, mock_read_file, mock_conf, mock_parser, mock_search, mock_isv6, mock_ip_local):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ mock_parser_inst.all_paths.return_value = ["nodelist.node.ring0_addr"]
+ mock_read_file.return_value = "data"
+ mock_search.return_value = mock.Mock()
+ mock_parser_inst.get_all.return_value = ["10.10.10.1"]
+ mock_isv6.return_value = False
+ mock_ip_local.return_value = ["192.168.1.1", "10.10.10.2", "20.20.20.2"]
+
+ corosync.find_configured_ip(["10.10.10.2"])
+
+ mock_conf.assert_called_once_with()
+ mock_parser.assert_called_once_with("data")
+ mock_parser_inst.all_paths.assert_called_once_with()
+ mock_parser_inst.get_all.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_isv6.assert_called_once_with("10.10.10.2")
+ mock_ip_local.assert_called_once_with(False)
+ mock_search.assert_called_once_with("nodelist.node.ring[0-9]*_addr", "nodelist.node.ring0_addr")
+
+ @mock.patch("crmsh.utils.InterfacesInfo.get_local_ip_list")
+ @mock.patch("crmsh.utils.IP.is_ipv6")
+ @mock.patch("re.search")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_find_configured_ip_exception(self, mock_read_file, mock_conf, mock_parser, mock_search, mock_isv6, mock_ip_local):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ mock_parser_inst.all_paths.return_value = ["nodelist.node.ring0_addr"]
+ mock_read_file.return_value = "data"
+ mock_search.return_value = mock.Mock()
+ mock_parser_inst.get_all.return_value = ["10.10.10.1", "10.10.10.2"]
+ mock_isv6.return_value = False
+ mock_ip_local.return_value = ["192.168.1.1", "10.10.10.2", "20.20.20.2"]
+
+ with self.assertRaises(corosync.IPAlreadyConfiguredError) as err:
+ corosync.find_configured_ip(["10.10.10.2"])
+ self.assertEqual("IP 10.10.10.2 was already configured", str(err.exception))
+
+ mock_conf.assert_called_once_with()
+ mock_parser.assert_called_once_with("data")
+ mock_parser_inst.all_paths.assert_called_once_with()
+ mock_parser_inst.get_all.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_isv6.assert_called_once_with("10.10.10.2")
+ mock_ip_local.assert_called_once_with(False)
+ # For some reason mock_search.assert_called_once_with does not work
+ mock_search.assert_has_calls([mock.call("nodelist.node.ring[0-9]*_addr", "nodelist.node.ring0_addr")])
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.make_section")
+ @mock.patch("crmsh.corosync.get_values")
+ @mock.patch("crmsh.corosync.make_value")
+ @mock.patch("crmsh.corosync.get_free_nodeid")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.utils.read_from_file")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.corosync.find_configured_ip")
+ def test_add_node_ucast(self, mock_find_ip, mock_conf, mock_read_file, mock_parser,
+ mock_free_id, mock_make_value, mock_get_values, mock_make_section, mock_str2file):
+ mock_parser_inst = mock.Mock()
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_read_file.return_value = "data"
+ mock_parser.return_value = mock_parser_inst
+ mock_free_id.return_value = 2
+ mock_make_value.side_effect = [["value1"], ["value2"]]
+ mock_get_values.return_value = []
+ mock_make_section.side_effect = ["section1", "section2"]
+ mock_parser_inst.count.return_value = 2
+ mock_parser_inst.get.return_value = "net"
+ mock_parser_inst.to_string.return_value = "string data"
+
+ corosync.add_node_ucast(['10.10.10.1'])
+
+ mock_find_ip.assert_called_once_with(['10.10.10.1'])
+ mock_parser.assert_called_once_with("data")
+ mock_free_id.assert_called_once_with(mock_parser_inst)
+ mock_make_value.assert_has_calls([
+ mock.call('nodelist.node.ring0_addr', '10.10.10.1'),
+ mock.call('nodelist.node.nodeid', '2')
+ ])
+ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr")
+ mock_make_section.assert_has_calls([
+ mock.call('nodelist', []),
+ mock.call('nodelist.node', ["value1", "value2"])
+ ])
+ mock_parser_inst.add.assert_has_calls([
+ mock.call('', 'section1'),
+ mock.call('nodelist', 'section2')
+ ])
+ mock_parser_inst.count.assert_called_once_with("nodelist.node")
+ mock_parser_inst.set.assert_has_calls([
+ mock.call('quorum.two_node', '1'),
+ mock.call('quorum.two_node', '0')
+ ])
+ mock_parser_inst.get.assert_called_once_with('quorum.device.model')
+ mock_parser_inst.to_string.assert_called_once_with()
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ def test_add_node_nodelist(self):
+ from crmsh.corosync import make_section, make_value, get_free_nodeid
+
+ p = Parser(F2)
+ _valid(p)
+ nid = get_free_nodeid(p)
+ c = p.count('nodelist.node')
+ p.add('nodelist',
+ make_section('nodelist.node',
+ make_value('nodelist.node.ring0_addr', 'foo') +
+ make_value('nodelist.node.nodeid', str(nid))))
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), c + 1)
+ self.assertEqual(get_free_nodeid(p), nid + 1)
+
+ def test_remove_node(self):
+ p = Parser(F2)
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 5)
+ p.remove_section_where('nodelist.node', 'nodeid', '2')
+ _valid(p)
+ self.assertEqual(p.count('nodelist.node'), 4)
+ self.assertEqual(p.get_all('nodelist.node.nodeid'),
+ ['1'])
+
+ def test_bnc862577(self):
+ p = Parser(F3)
+ _valid(p)
+ self.assertEqual(p.count('service.ver'), 1)
+
+ def test_get_free_nodeid(self):
+ def ids(*lst):
+ class Ids(object):
+ def get_all(self, _arg):
+ return lst
+ return Ids()
+ self.assertEqual(1, corosync.get_free_nodeid(ids('2', '5')))
+ self.assertEqual(3, corosync.get_free_nodeid(ids('1', '2', '5')))
+ self.assertEqual(4, corosync.get_free_nodeid(ids('1', '2', '3')))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittests/test_crashtest_check.py b/test/unittests/test_crashtest_check.py
new file mode 100644
index 0000000..deb1ca5
--- /dev/null
+++ b/test/unittests/test_crashtest_check.py
@@ -0,0 +1,790 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import check, config
+
+
+class TestCheck(TestCase):
+
+ @mock.patch('crmsh.crash_test.check.check_cluster')
+ def test_check(self, mock_cluster_check):
+ ctx = mock.Mock(cluster_check=True)
+ check.check(ctx)
+ mock_cluster_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.check.check_firewall')
+ @mock.patch('crmsh.crash_test.check.check_time_service')
+ @mock.patch('crmsh.crash_test.check.check_my_hostname_resolves')
+ def test_check_environment(self, mock_hostname, mock_time, mock_firewall):
+ check.check_environment()
+ mock_hostname.assert_called_once_with()
+ mock_time.assert_called_once_with()
+ mock_firewall.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.check.crmshboot.my_hostname_resolves')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_my_hostname_resolves(self, mock_task_check, mock_hostname, mock_this_node):
+ mock_task_inst = mock.Mock()
+ mock_task_check.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_hostname.return_value = False
+ mock_this_node.return_value = "node1"
+
+ check.check_my_hostname_resolves()
+
+ mock_task_check.assert_called_once_with("Checking hostname resolvable")
+ mock_hostname.assert_called_once_with()
+ mock_task_inst.error.assert_called_once_with('Hostname "node1" is unresolvable.\n Please add an entry to /etc/hosts or configure DNS.')
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service_none(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.side_effect = [False, False, False]
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_has_calls([
+ mock.call('chronyd.service'),
+ mock.call('ntp.service'),
+ mock.call('ntpd.service')
+ ])
+ mock_task_inst.warn.assert_called_once_with("No NTP service found.")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service_warn(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.return_value = True
+ mock_service_enabled.return_value = False
+ mock_service_active.return_value = False
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_called_once_with("chronyd.service")
+ mock_task_inst.info.assert_called_once_with("chronyd.service is available")
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("chronyd.service is disabled"),
+ mock.call("chronyd.service is not active"),
+ ])
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_time_service(self, mock_task, mock_service_available, mock_service_enabled, mock_service_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_service_available.return_value = True
+ mock_service_enabled.return_value = True
+ mock_service_active.return_value = True
+
+ check.check_time_service()
+
+ mock_task.assert_called_once_with("Checking time service")
+ mock_service_available.assert_called_once_with("chronyd.service")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("chronyd.service is available"),
+ mock.call("chronyd.service is enabled"),
+ mock.call("chronyd.service is active")
+ ])
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open_return(self, mock_corosync_port, mock_run):
+ mock_corosync_port.return_value = ["1234", "5678"]
+ mock_run.return_value = (1, None, "error")
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("error")
+ mock_run.assert_called_once_with("firewall-cmd --list-port")
+
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open_fail_to_get_port(self, mock_corosync_port):
+ mock_corosync_port.return_value = []
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("Can not get corosync's port")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.corosync_port_list')
+ def test_check_port_open(self, mock_corosync_port, mock_run):
+ mock_corosync_port.return_value = ["1234", "5678"]
+ output_cmd = """
+ 1234/udp
+ 4444/tcp
+ """
+ mock_run.return_value = (0, output_cmd, None)
+ task_inst = mock.Mock()
+
+ check.check_port_open(task_inst, "firewalld")
+
+ mock_corosync_port.assert_called_once_with()
+ task_inst.error.assert_called_once_with("UDP port 5678 should open in firewalld")
+ mock_run.assert_called_once_with("firewall-cmd --list-port")
+ task_inst.info.assert_called_once_with("UDP port 1234 is opened in firewalld")
+
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall_not_intalled(self, mock_task, mock_installed):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.side_effect = [False, False]
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_has_calls([
+ mock.call("firewalld"),
+ mock.call("SuSEfirewall2")
+ ])
+ mock_task_inst.warn.assert_called_once_with("Failed to detect firewall")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall_warn(self, mock_task, mock_installed, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.return_value = True
+ mock_active.return_value = False
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_called_once_with("firewalld")
+ mock_task_inst.info.assert_called_once_with("firewalld.service is available")
+ mock_task_inst.warn.assert_called_once_with("firewalld.service is not active")
+
+ @mock.patch('crmsh.crash_test.check.check_port_open')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.crash_test.check.crmshutils.package_is_installed')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_firewall(self, mock_task, mock_installed, mock_active, mock_check_port):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_installed.return_value = True
+ mock_active.return_value = True
+
+ check.check_firewall()
+
+ mock_task.assert_called_once_with("Checking firewall")
+ mock_installed.assert_called_once_with("firewalld")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("firewalld.service is available"),
+ mock.call("firewalld.service is active")
+ ])
+ mock_active.assert_called_once_with("firewalld")
+ mock_check_port.assert_called_once_with(mock_task_inst, "firewalld")
+
+ @mock.patch('crmsh.crash_test.check.check_cluster_service')
+ def test_check_cluster_return(self, mock_check_cluster):
+ mock_check_cluster.return_value = False
+ check.check_cluster()
+ mock_check_cluster.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.check.check_resources')
+ @mock.patch('crmsh.crash_test.check.check_nodes')
+ @mock.patch('crmsh.crash_test.check.check_fencing')
+ @mock.patch('crmsh.crash_test.check.check_cluster_service')
+ def test_check_cluster(self, mock_check_cluster, mock_check_fencing, mock_check_nodes, mock_check_resources):
+ mock_check_cluster.return_value = True
+ check.check_cluster()
+ mock_check_cluster.assert_called_once_with()
+ mock_check_fencing.assert_called_once_with()
+ mock_check_nodes.assert_called_once_with()
+ mock_check_resources.assert_called_once_with()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_cluster_service_pacemaker_disable(self, mock_task, mock_enabled, mock_active):
+ mock_task_inst = mock.Mock(passed=False)
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_enabled.side_effect = [False, True]
+ mock_active.side_effect = [True, False]
+
+ res = check.check_cluster_service()
+ self.assertEqual(res, False)
+
+ mock_task.assert_called_once_with("Checking cluster service", quiet=False)
+ mock_enabled.assert_has_calls([
+ mock.call("pacemaker"),
+ mock.call("corosync")
+ ])
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("pacemaker.service is disabled"),
+ mock.call("corosync.service is enabled")
+ ])
+ mock_active.assert_has_calls([
+ mock.call("corosync"),
+ mock.call("pacemaker")
+ ])
+ mock_task_inst.info.assert_called_once_with("corosync.service is running")
+ mock_task_inst.error.assert_called_once_with("pacemaker.service is not running!")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_cluster_service(self, mock_task, mock_enabled, mock_active):
+ mock_task_inst = mock.Mock(passed=True)
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_enabled.side_effect = [True, True]
+ mock_active.side_effect = [True, True]
+
+ res = check.check_cluster_service()
+ self.assertEqual(res, True)
+
+ mock_task.assert_called_once_with("Checking cluster service", quiet=False)
+ mock_enabled.assert_has_calls([
+ mock.call("pacemaker"),
+ mock.call("corosync")
+ ])
+ mock_active.assert_has_calls([
+ mock.call("corosync"),
+ mock.call("pacemaker")
+ ])
+ mock_task_inst.info.assert_has_calls([
+ mock.call("pacemaker.service is enabled"),
+ mock.call("corosync.service is running"),
+ mock.call("pacemaker.service is running")
+ ])
+ mock_task_inst.warn.assert_called_once_with("corosync.service is enabled")
+
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_no_stonith(self, mock_task, mock_fence_info):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=False)
+ mock_fence_info.return_value = mock_fence_info_inst
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_task_inst.warn.assert_called_once_with("stonith is disabled")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_no_resources(self, mock_task, mock_fence_info, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (1, None, None)
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_called_once_with("stonith is enabled")
+ mock_task_inst.warn.assert_called_once_with("No stonith resource configured!")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing_has_warn(self, mock_task, mock_fence_info, mock_run, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (0, "* stonith-sbd (stonith:external/sbd): Stopped (disabled)", None)
+ mock_active.return_value = False
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("stonith is enabled"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is configured")
+ ])
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("stonith resource stonith-sbd(external/sbd) is Stopped"),
+ mock.call("sbd service is not running!")
+ ])
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_fencing(self, mock_task, mock_fence_info, mock_run, mock_active):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_fence_info_inst = mock.Mock(fence_enabled=True)
+ mock_fence_info.return_value = mock_fence_info_inst
+ mock_run.return_value = (0, "* stonith-sbd (stonith:external/sbd): Started node2", None)
+ mock_active.return_value = True
+
+ check.check_fencing()
+
+ mock_task.assert_called_once_with("Checking STONITH/Fence")
+ mock_fence_info.assert_called_once_with()
+ mock_run.assert_called_once_with("crm_mon -r1 | grep '(stonith:.*):'")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("stonith is enabled"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is configured"),
+ mock.call("stonith resource stonith-sbd(external/sbd) is Started"),
+ mock.call("sbd service is running")
+ ])
+ mock_active.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes_error(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_run.return_value = (1, None, "error data")
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.error.assert_called_once_with("run \"crm_mon -1\" error: error data")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ output = """
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: 15sp2-1 (version 2.0.3+20200511.2b248d828-1.10-2.0.3+20200511.2b248d828) - partition with quorum
+ * Last updated: Tue Nov 3 14:09:29 2020
+ * Last change: Tue Nov 3 13:47:29 2020 by root via cibadmin on 15sp2-1
+ * 2 nodes configured
+ * 1 resource instance configured (1 DISABLED)
+
+Node List:
+ * Online: [ 15sp2-1 ]
+ * OFFLINE: [ 15sp2-2 ]
+ """
+ mock_run.return_value = (0, output, None)
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("DC node: 15sp2-1"),
+ mock.call("Cluster have quorum"),
+ mock.call("Online nodes: [ 15sp2-1 ]")
+ ])
+ mock_task_inst.warn.assert_called_once_with("OFFLINE nodes: [ 15sp2-2 ]")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_nodes_warn(self, mock_task, mock_run):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ output = """
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: NONE
+ * Last updated: Tue Nov 3 14:16:49 2020
+ * Last change: Tue Nov 3 14:09:29 2020 by root via cibadmin on 15sp2-1
+ * 2 nodes configured
+ * 1 resource instance configured (1 DISABLED)
+
+Node List:
+ * Node 15sp2-1: UNCLEAN (offline)
+ * Node 15sp2-2: UNCLEAN (offline)
+
+Active Resources:
+ * No active resources
+ """
+ mock_run.return_value = (0, output, None)
+
+ check.check_nodes()
+
+ mock_task.assert_called_once_with("Checking nodes")
+ mock_run.assert_called_once_with("crm_mon -1")
+ mock_task_inst.warn.assert_has_calls([
+ mock.call("Cluster lost quorum!"),
+ mock.call("Node 15sp2-1 is UNCLEAN!"),
+ mock.call("Node 15sp2-2 is UNCLEAN!")
+ ])
+
+ @mock.patch('crmsh.crash_test.check.completers.resources_stopped')
+ @mock.patch('crmsh.crash_test.check.completers.resources_started')
+ @mock.patch('crmsh.crash_test.task.TaskCheck')
+ def test_check_resources(self, mock_task, mock_started, mock_stopped):
+ mock_task_inst = mock.Mock()
+ mock_task.return_value = mock_task_inst
+ mock_task_inst.run.return_value.__enter__ = mock.Mock()
+ mock_task_inst.run.return_value.__exit__ = mock.Mock()
+ mock_started.return_value = ["r1", "r2"]
+ mock_stopped.return_value = ["r3", "r4"]
+
+ check.check_resources()
+
+ mock_task.assert_called_once_with("Checking resources")
+ mock_task_inst.info.assert_has_calls([
+ mock.call("Started resources: r1,r2"),
+ mock.call("Stopped resources: r3,r4")
+ ])
+
+ # Test fix()
+ @classmethod
+ @mock.patch('crmsh.crash_test.check.correct_sbd')
+ @mock.patch('crmsh.crash_test.check.check_sbd')
+ def test_fix_no_candidate(cls, mock_check_sbd, mock_correct_sbd):
+ """
+ Test fix() has no valid candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ ctx = mock.Mock(fix_conf=True)
+ mock_check_sbd.return_value = dev
+ check.fix(ctx)
+ mock_correct_sbd.assert_called_once_with(ctx, dev)
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.check.correct_sbd')
+ @mock.patch('crmsh.crash_test.check.check_sbd')
+ def test_fix_has_candidate(cls, mock_check_sbd, mock_correct_sbd):
+ """
+ Test fix() has valid candidate
+ """
+ ctx = mock.Mock(fix_conf=True)
+ mock_check_sbd.return_value = ""
+ mock_correct_sbd.return_value = ""
+ check.fix(ctx)
+ mock_correct_sbd.assert_not_called()
+
+ # Test check_sbd()
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_no_conf(cls, mock_os_path_exists,
+ mock_utils_msg_info, mock_run):
+ """
+ Test no configuration file
+ """
+ mock_os_path_exists.return_value = False
+ check.check_sbd()
+ mock_utils_msg_info.assert_called_with("SBD configuration file {} not found.".
+ format(config.SBD_CONF), to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_not_configured(cls, mock_os_path_exists, mock_utils_parse_sysconf,
+ mock_utils_msg_info, mock_run):
+ """
+ Test SBD device not configured
+ """
+ mock_os_path_exists.return_value = True
+ mock_utils_parse_sysconf.return_value = {}
+ check.check_sbd()
+ mock_utils_msg_info.assert_called_with("SBD DEVICE not used.", to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_valid(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_info, mock_is_valid_sbd, mock_run):
+ """
+ Test configured SBD device exist and valid
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = True
+
+ check.check_sbd()
+ mock_msg_info.assert_called_with("'{}' is a valid SBD device.".format(dev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_valid_but_no_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid and no candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = ""
+
+ check.check_sbd()
+ mock_msg_warn.assert_has_calls(
+ [mock.call("Device '{}' is not valid for SBD, may need initialize.".
+ format(dev), to_stdout=False),
+ mock.call("Fail to find a valid candidate SBD device.",
+ to_stdout=False)])
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_exist_has_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_msg_info, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid but has candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ candev = "/dev/disk/by-id/scsi-SATA_ST2037LM010-2R82_WDZ5J36B"
+ mock_os_path_exists.side_effect = [True, False]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = candev
+
+ check.check_sbd()
+ mock_msg_warn.assert_called_once_with(
+ "SBD device '{}' is not exist.".format(dev),
+ to_stdout=False)
+ mock_msg_info.assert_called_with("Found '{}' with SBD header exist.".format(candev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.task.TaskCheck.print_result')
+ @mock.patch('crmsh.crash_test.utils.find_candidate_sbd')
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ def test_check_sbd_exist_and_not_valid_has_can(cls, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_find_hexdump,
+ mock_msg_warn, mock_msg_info, mock_is_valid_sbd,
+ mock_find_can_sbd, mock_run):
+ """
+ Test configured SBD device not valid but has candidate
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ candev = "/dev/disk/by-id/scsi-SATA_ST2037LM010-2R82_WDZ5J36B"
+ mock_os_path_exists.side_effect = [True, True, True]
+ mock_utils_parse_sysconf.return_value = {"SBD_DEVICE": dev}
+ mock_find_hexdump.return_value = (0, "/usr/bin/hexdump", None)
+ mock_is_valid_sbd.return_value = False
+ mock_find_can_sbd.return_value = candev
+
+ check.check_sbd()
+ mock_msg_warn.assert_called_once_with(
+ "Device '{}' is not valid for SBD, may need initialize.".format(dev),
+ to_stdout=False)
+ mock_msg_info.assert_called_with("Found '{}' with SBD header exist.".format(candev),
+ to_stdout=False)
+ mock_run.assert_called_once_with()
+
+ # Test correct_sbd()
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_exception_no_conf(self, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info,
+ mock_error):
+ """
+ Test correct_sbd with exception
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_context = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [False, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_error.assert_called_once_with('Configure file {} not exist!'.
+ format(config.SBD_CONF))
+
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_exception_no_dev(self, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info,
+ mock_error):
+ """
+ Test correct_sbd with exception
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_context = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, False]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_error.assert_called_once_with('Device {} not exist!'.format(dev))
+
+ @classmethod
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.crash_test.task.TaskFixSBD.verify')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('os.remove')
+ @mock.patch('shutil.move')
+ @mock.patch('shutil.copymode')
+ @mock.patch('shutil.copyfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd(cls, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info, mock_copyfile,
+ mock_copymode, mock_move, mock_remove,
+ mock_mkstemp, mock_sbd_verify, mock_open):
+ """
+ Test correct_sbd
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_context.return_value = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+ mock_open.side_effect = [
+ mock.mock_open(read_data="data1").return_value,
+ mock.mock_open(read_data="SBD_DEVICE={}".format(dev)).return_value
+ ]
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_copyfile.assert_called_once_with(config.SBD_CONF, bak)
+ mock_copymode.assert_called_once_with(config.SBD_CONF, edit)
+ mock_move.assert_called_once_with(edit, config.SBD_CONF)
+ mock_remove.assert_called()
+ mock_sbd_verify.assert_called_once_with()
+
+ @classmethod
+ @mock.patch('builtins.open')
+ @mock.patch('crmsh.crash_test.task.Task.error')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('shutil.copymode')
+ @mock.patch('shutil.copyfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.main.Context')
+ def test_correct_sbd_run_exception(cls, mock_context, mock_os_path_exists,
+ mock_utils_parse_sysconf, mock_msg_info, mock_copyfile,
+ mock_copymode, mock_mkstemp, mock_msg_error,
+ mock_open):
+ """
+ Test correct_sbd
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_context.return_value = mock.Mock(yes=True)
+ mock_os_path_exists.side_effect = [True, True]
+ mock_utils_parse_sysconf.retrun_value = {"SBD_DEVICE": dev}
+ mock_open.side_effect = [
+ mock.mock_open(read_data="data1").return_value,
+ mock.mock_open(read_data="data2").return_value
+ ]
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+ mock_copymode.side_effect = Exception('Copy file error!')
+
+ with cls.assertRaises(cls, crmshutils.TerminateSubCommand):
+ check.correct_sbd(mock_context, dev)
+
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+ mock_copyfile.assert_has_calls([mock.call(config.SBD_CONF, bak),
+ mock.call(bak, config.SBD_CONF)])
+ mock_copymode.assert_called_once_with(config.SBD_CONF, edit)
+ mock_msg_error.assert_called_once_with('Fail to modify file {}'.
+ format(config.SBD_CONF))
diff --git a/test/unittests/test_crashtest_main.py b/test/unittests/test_crashtest_main.py
new file mode 100644
index 0000000..02ae7b3
--- /dev/null
+++ b/test/unittests/test_crashtest_main.py
@@ -0,0 +1,215 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import utils, main, config, task
+
+
+class TestContext(TestCase):
+
+ def test_context(self):
+ main.ctx.name = "xin"
+ self.assertEqual(main.ctx.name, "xin")
+
+
+class TestMain(TestCase):
+
+ @mock.patch('crmsh.crash_test.main.MyArgParseFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_parse_argument_help(self, mock_parser, mock_myformatter):
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ ctx = mock.Mock(process_name="crash_test", logfile="logfile1",
+ jsonfile="jsonfile1", report_path="/var/log/report")
+ mock_parse_args_inst = mock.Mock(help=True)
+ mock_parser_inst.parse_args.return_value = mock_parse_args_inst
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.parse_argument(ctx)
+
+ mock_parser_inst.print_help.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.main.MyArgParseFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_parse_argument(self, mock_parser, mock_myformatter):
+ mock_parser_inst = mock.Mock()
+ mock_parser.return_value = mock_parser_inst
+ ctx = mock.Mock(process_name="crash_test", logfile="logfile1",
+ jsonfile="jsonfile1", report_path="/var/log/report")
+ mock_parse_args_inst = mock.Mock(help=False, env_check=True, sbd=True)
+ mock_parser_inst.parse_args.return_value = mock_parse_args_inst
+
+ main.parse_argument(ctx)
+ self.assertEqual(ctx.env_check, True)
+ self.assertEqual(ctx.sbd, True)
+
+ mock_parser_inst.print_help.assert_not_called()
+
+ def test_setup_basic_context(self):
+ ctx = mock.Mock(process_name="crash_test")
+ main.setup_basic_context(ctx)
+ self.assertEqual(ctx.var_dir, "/var/lib/crmsh/crash_test")
+ self.assertEqual(ctx.report_path, "/var/lib/crmsh/crash_test")
+ self.assertEqual(ctx.jsonfile, "/var/lib/crmsh/crash_test/crash_test.json")
+ self.assertEqual(ctx.logfile, "/var/log/crmsh/crmsh.log")
+
+ @mock.patch('logging.Logger.fatal')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run_non_root(self, mock_setup, mock_parse, mock_is_root, mock_log_fatal):
+ mock_is_root.return_value = False
+ ctx = mock.Mock(process_name="crash_test")
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_log_fatal.assert_called_once_with("{} can only be executed as user root!".format(ctx.process_name))
+
+ @mock.patch('crmsh.crash_test.main.split_brain')
+ @mock.patch('crmsh.crash_test.main.fence_node')
+ @mock.patch('crmsh.crash_test.main.kill_process')
+ @mock.patch('crmsh.crash_test.main.check.check')
+ @mock.patch('crmsh.crash_test.main.check.fix')
+ @mock.patch('os.makedirs')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run(self, mock_setup, mock_parse, mock_is_root, mock_exists, mock_mkdir,
+ mock_fix, mock_check, mock_kill, mock_fence, mock_sb):
+ mock_is_root.return_value = True
+ ctx = mock.Mock(var_dir="/var/lib/crash_test")
+ mock_exists.return_value = False
+
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_exists.assert_called_once_with(ctx.var_dir)
+ mock_mkdir.assert_called_once_with(ctx.var_dir, exist_ok=True)
+ mock_check.assert_called_once_with(ctx)
+ mock_fix.assert_called_once_with(ctx)
+ mock_kill.assert_called_once_with(ctx)
+ mock_fence.assert_called_once_with(ctx)
+ mock_sb.assert_called_once_with(ctx)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.main.check.check')
+ @mock.patch('crmsh.crash_test.main.check.fix')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.crash_test.utils.is_root')
+ @mock.patch('crmsh.crash_test.main.parse_argument')
+ @mock.patch('crmsh.crash_test.main.setup_basic_context')
+ def test_run_except(self, mock_setup, mock_parse, mock_is_root, mock_exists,
+ mock_fix, mock_check, mock_dumps):
+ mock_is_root.return_value = True
+ ctx = mock.Mock(var_dir="/var/lib/crash_test")
+ mock_exists.return_value = True
+ mock_check.side_effect = KeyboardInterrupt
+
+ with self.assertRaises(KeyboardInterrupt):
+ main.run(ctx)
+
+ mock_setup.assert_called_once_with(ctx)
+ mock_parse.assert_called_once_with(ctx)
+ mock_is_root.assert_called_once_with()
+ mock_exists.assert_called_once_with(ctx.var_dir)
+ mock_check.assert_called_once_with(ctx)
+ mock_fix.assert_called_once_with(ctx)
+ mock_dumps.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_porcess_return_pacemaker_loop(self, mock_task_kill):
+ ctx = mock.Mock(pacemakerd=True, loop=True, sbd=None, corosync=None)
+ main.kill_process(ctx)
+ mock_task_kill.assert_not_called()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_porcess_return(self, mock_task_kill):
+ ctx = mock.Mock(pacemakerd=False, sbd=False, corosync=False)
+ main.kill_process(ctx)
+ mock_task_kill.assert_not_called()
+
+ @mock.patch('crmsh.crash_test.task.TaskKill')
+ def test_kill_process(self, mock_task_kill):
+ mock_task_kill_inst = mock.Mock()
+ mock_task_kill.return_value = mock_task_kill_inst
+ mock_task_kill_inst.wait.side_effect = task.TaskError("error data")
+ ctx = mock.Mock(sbd=True)
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.kill_process(ctx)
+
+ mock_task_kill_inst.pre_check.assert_called_once_with()
+ mock_task_kill_inst.print_header.assert_called_once_with()
+ mock_task_kill_inst.enable_report.assert_called_once_with()
+ mock_task_kill_inst.run.assert_called_once_with()
+ mock_task_kill_inst.wait.assert_called_once_with()
+ mock_task_kill_inst.error.assert_called_once_with("error data")
+
+ def test_split_brain_return(self):
+ ctx = mock.Mock(sp_iptables=None)
+ main.split_brain(ctx)
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain')
+ def test_split_brain(self, mock_sp):
+ ctx = mock.Mock(sp_iptables=True, force=False)
+ mock_sp_inst = mock.Mock()
+ mock_sp.return_value = mock_sp_inst
+ mock_sp_inst.do_block.return_value.__enter__ = mock.Mock()
+ mock_sp_inst.do_block.return_value.__exit__ = mock.Mock()
+
+ main.split_brain(ctx)
+
+ mock_sp.assert_called_once_with(False)
+ mock_sp_inst.pre_check.assert_called_once_with()
+ mock_sp_inst.print_header.assert_called_once_with()
+ mock_sp_inst.do_block.assert_called_once_with()
+ mock_sp_inst.run.assert_called_once_with()
+ mock_sp_inst.wait.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain')
+ def test_split_brain_exception(self, mock_sp):
+ ctx = mock.Mock(sp_iptables=True)
+ mock_sp_inst = mock.Mock()
+ mock_sp.return_value = mock_sp_inst
+ mock_sp_inst.pre_check.side_effect = task.TaskError("error data")
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.split_brain(ctx)
+
+ mock_sp_inst.error.assert_called_once_with("error data")
+
+ def test_fence_node_return(self):
+ ctx = mock.Mock(fence_node=None)
+ main.fence_node(ctx)
+
+ @mock.patch('crmsh.crash_test.task.TaskFence')
+ def test_fence_node(self, mock_task_fence):
+ mock_task_fence_inst = mock.Mock()
+ mock_task_fence.return_value = mock_task_fence_inst
+ mock_task_fence_inst.wait.side_effect = task.TaskError("error data")
+ ctx = mock.Mock(fence_node=True)
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ main.fence_node(ctx)
+
+ mock_task_fence_inst.pre_check.assert_called_once_with()
+ mock_task_fence_inst.print_header.assert_called_once_with()
+ mock_task_fence_inst.run.assert_called_once_with()
+ mock_task_fence_inst.wait.assert_called_once_with()
+ mock_task_fence_inst.error.assert_called_once_with("error data")
+
+ @classmethod
+ def test_MyArgParseFormatter(cls):
+ main.MyArgParseFormatter("test")
diff --git a/test/unittests/test_crashtest_task.py b/test/unittests/test_crashtest_task.py
new file mode 100644
index 0000000..3b4c092
--- /dev/null
+++ b/test/unittests/test_crashtest_task.py
@@ -0,0 +1,777 @@
+import os
+import sys
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+from datetime import datetime
+
+from crmsh import utils as crmshutils
+from crmsh.crash_test import utils, main, config, task
+
+
+class TestTaskKill(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ ctx = mock.Mock(current_case="sbd", loop=False)
+ self.task_kill_inst = task.TaskKill(ctx)
+ ctx2 = mock.Mock(current_case="sbd", loop=True)
+ self.task_kill_inst_loop = task.TaskKill(ctx2)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('os.path.isdir')
+ def test_enable_report_error(self, mock_isdir):
+ mock_isdir.return_value = False
+ main.ctx = mock.Mock(report_path="/path")
+ with self.assertRaises(task.TaskError) as error:
+ self.task_kill_inst.enable_report()
+ self.assertEqual("/path is not a directory", str(error.exception))
+ mock_isdir.assert_called_once_with("/path")
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.now')
+ @mock.patch('os.path.isdir')
+ def test_enable_report_looping(self, mock_isdir, mock_now, mock_this_node):
+ main.ctx = mock.Mock(report_path="/path", process_name="cpc")
+ mock_now.return_value = "20210119-12345"
+ mock_this_node.return_value = "node1"
+ self.task_kill_inst_loop.enable_report()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.now')
+ @mock.patch('os.path.isdir')
+ def test_enable_report(self, mock_isdir, mock_now, mock_this_node):
+ main.ctx = mock.Mock(report_path="/path", process_name="cpc")
+ mock_now.return_value = "20210119-12345"
+ mock_this_node.return_value = "node1"
+ self.task_kill_inst.enable_report()
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Force kill sbd
+Looping Kill: False
+Expected State: a) sbd process restarted
+ b) Or, this node fenced.
+"""
+ res = self.task_kill_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ def test_to_json(self, mock_dumps):
+ self.task_kill_inst.build_base_result = mock.Mock()
+ self.task_kill_inst.result = {}
+ self.task_kill_inst.prev_task_list = []
+ self.task_kill_inst.to_json()
+ self.task_kill_inst.build_base_result.assert_called_once_with()
+ mock_dumps.assert_called_once_with()
+
+ def test_to_report_return(self):
+ self.task_kill_inst.report = False
+ self.task_kill_inst.to_report()
+
+ @mock.patch('os.fsync')
+ @mock.patch('builtins.open', create=True)
+ @mock.patch('crmsh.crash_test.task.TaskKill.header')
+ def test_to_report(self, mock_header, mock_open_file, mock_fsync):
+ mock_header.return_value = "#### header"
+ self.task_kill_inst.report = True
+ self.task_kill_inst.messages = [["info", "data", "2021"]]
+ self.task_kill_inst.explain = "explain"
+ self.task_kill_inst.report_file = "report_file1"
+ file_handle = mock_open_file.return_value.__enter__.return_value
+
+ self.task_kill_inst.to_report()
+
+ file_handle.write.assert_has_calls([
+ mock.call("#### header"),
+ mock.call("\nLog:\n"),
+ mock.call("2021 INFO:data\n"),
+ mock.call("\nTestcase Explained:\n"),
+ mock.call("explain\n")
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check(self, mock_pre_check, mock_status):
+ mock_status.return_value = (False, 100)
+ with self.assertRaises(task.TaskError) as err:
+ self.task_kill_inst.pre_check()
+ self.assertEqual("Process sbd is not running!", str(err.exception))
+ mock_pre_check.assert_called_once_with()
+ mock_status.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.crash_test.task.TaskKill.process_monitor')
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ def test_run(self, mock_status, mock_info, mock_run, mock_thread, mock_fence_monitor, mock_process_monitor):
+ mock_status.side_effect = [(False, -1), (True, 100)]
+ mock_thread_fence_inst = mock.Mock()
+ mock_thread_restart_inst = mock.Mock()
+ mock_thread.side_effect = [mock_thread_fence_inst, mock_thread_restart_inst]
+
+ self.task_kill_inst.run()
+
+ mock_status.assert_has_calls([mock.call("sbd"), mock.call("sbd")])
+ mock_info.assert_has_calls([
+ mock.call('Process sbd(100) is running...'),
+ mock.call('Trying to run "killall -9 sbd"')
+ ])
+ mock_run.assert_called_once_with("killall -9 sbd")
+ mock_thread.assert_has_calls([
+ mock.call(target=mock_fence_monitor),
+ mock.call(target=mock_process_monitor),
+ ])
+ mock_thread_fence_inst.start.assert_called_once_with()
+ mock_thread_restart_inst.start.assert_called_once_with()
+
+ def test_wait_exception(self):
+ self.task_kill_inst.fence_start_event = mock.Mock()
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.fence_start_event.wait.return_value = True
+ self.task_kill_inst.restart_happen_event.is_set.return_value = False
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_kill_inst.wait()
+ self.assertEqual("Process sbd is not restarted!", str(err.exception))
+
+ def test_wait(self):
+ self.task_kill_inst.fence_start_event = mock.Mock()
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.fence_start_event.wait.return_value = True
+ self.task_kill_inst.restart_happen_event.is_set.return_value = True
+
+ self.task_kill_inst.wait()
+
+ self.task_kill_inst.thread_stop_event.set.assert_called_once_with()
+
+ @mock.patch('time.sleep')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.get_process_status')
+ def test_process_monitor(self, mock_status, mock_info, mock_sleep):
+ self.task_kill_inst.thread_stop_event = mock.Mock()
+ self.task_kill_inst.thread_stop_event.is_set.side_effect = [False, False]
+ self.task_kill_inst.restart_happen_event = mock.Mock()
+ mock_status.side_effect = [(False, -1), (True, 100)]
+
+ self.task_kill_inst.process_monitor()
+
+ self.task_kill_inst.thread_stop_event.is_set.assert_has_calls([
+ mock.call(),
+ mock.call()
+ ])
+ mock_status.assert_has_calls([
+ mock.call("sbd"),
+ mock.call("sbd")
+ ])
+ mock_info.assert_called_once_with("Process sbd(100) is restarted!")
+ self.task_kill_inst.restart_happen_event.set.assert_called_once_with()
+ mock_sleep.assert_called_once_with(1)
+
+
+class TestTaskCheck(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.now')
+ def setUp(self, mock_now, mock_msg_info):
+ """
+ Test setUp.
+ """
+ mock_now.return_value = "2019/07/10 01:15:15"
+ main.ctx = mock.Mock(task_list=[{"process_name": "xin", "age": 38}])
+ self.task_check_inst = task.TaskCheck("task check job1", quiet=False)
+ self.task_check_inst_quiet = task.TaskCheck("task check job1", quiet=True)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.MyLoggingFormatter')
+ @mock.patch('crmsh.crash_test.utils.get_handler')
+ @mock.patch('crmsh.crash_test.utils.manage_handler')
+ def test_to_stdout(self, mock_manage_handler, mock_get_handler, mock_myformatter):
+ mock_manage_handler.return_value.__enter__ = mock.Mock()
+ mock_manage_handler.return_value.__exit__ = mock.Mock()
+
+ task.logger = mock.Mock()
+ task.logger.info = mock.Mock()
+ task.logger.log = mock.Mock()
+
+ get_handler_inst1 = mock.Mock()
+ get_handler_inst1.setFormatter = mock.Mock()
+ get_handler_inst2 = mock.Mock()
+ get_handler_inst2.setFormatter = mock.Mock()
+ mock_get_handler.side_effect = [get_handler_inst1, get_handler_inst2]
+
+ myformatter_inst1 = mock.Mock()
+ myformatter_inst2 = mock.Mock()
+ mock_myformatter.side_effect = [myformatter_inst1, myformatter_inst2]
+
+ self.task_check_inst.messages = [("info", "info message"), ("warn", "warn message")]
+ utils.CGREEN = ""
+ utils.CEND = ""
+ utils.CRED = ""
+
+ self.task_check_inst.to_stdout()
+
+ mock_manage_handler.assert_called_once_with("file", keep=False)
+ mock_get_handler.assert_has_calls([
+ mock.call(task.logger, "stream"),
+ mock.call(task.logger, "stream")
+ ])
+ get_handler_inst1.setFormatter.assert_called_once_with(myformatter_inst1)
+ get_handler_inst2.setFormatter.assert_called_once_with(myformatter_inst2)
+ mock_myformatter.assert_has_calls([
+ mock.call(flush=False),
+ mock.call()
+ ])
+ task.logger.info.assert_called_once_with('task check job1 [Pass]', extra={'timestamp': '[2019/07/10 01:15:15]'})
+ task.logger.log.assert_has_calls([
+ mock.call(20, 'info message', extra={'timestamp': ' '}).
+ mock.call(30, 'warn message', extra={'timestamp': ' '})
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ def test_to_json(self, mock_dumps):
+ self.task_check_inst.build_base_result = mock.Mock()
+ self.task_check_inst.result = {}
+ self.task_check_inst.to_json()
+ self.task_check_inst.build_base_result.assert_called_once_with()
+ mock_dumps.assert_called_once_with()
+
+ def test_print_result(self):
+ self.task_check_inst.to_stdout = mock.Mock()
+ self.task_check_inst.to_json = mock.Mock()
+ self.task_check_inst.print_result()
+ self.task_check_inst.to_stdout.assert_called_once_with()
+ self.task_check_inst.to_json.assert_called_once_with()
+
+ def test_print_result_quiet(self):
+ self.task_check_inst.quiet = True
+ self.task_check_inst.to_stdout = mock.Mock()
+ self.task_check_inst.print_result()
+ self.task_check_inst.to_stdout.assert_not_called()
+
+ def test_run(self):
+ self.task_check_inst.print_result = mock.Mock()
+ with self.task_check_inst.run():
+ pass
+ self.task_check_inst.print_result.assert_called_once_with()
+
+
+class TestTaskSplitBrain(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ self.task_sp_inst = task.TaskSplitBrain()
+ self.task_sp_inst.fence_action = "reboot"
+ self.task_sp_inst.fence_timeout = 60
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Simulate split brain by blocking traffic between cluster nodes
+Expected Result: One of nodes get fenced
+Fence action: reboot
+Fence timeout: 60
+"""
+ res = self.task_sp_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.task.Task.build_base_result')
+ def test_to_json(self, mock_result, mock_json):
+ self.task_sp_inst.result = {}
+ self.task_sp_inst.to_json()
+ mock_result.assert_called_once_with()
+ mock_json.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_no_cmd(self, mock_pre_check, mock_run):
+ mock_run.return_value = (1, None, "error")
+ with self.assertRaises(task.TaskError) as err:
+ self.task_sp_inst.pre_check()
+ self.assertEqual("error", str(err.exception))
+ mock_run.assert_called_once_with("which iptables")
+ mock_pre_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_error(self, mock_pre_check, mock_run, mock_online_nodes):
+ mock_run.return_value = (0, None, None)
+ mock_online_nodes.return_value = ["node1"]
+ with self.assertRaises(task.TaskError) as err:
+ self.task_sp_inst.pre_check()
+ self.assertEqual("At least two nodes online!", str(err.exception))
+ mock_run.assert_called_once_with("which iptables")
+ mock_online_nodes.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.crmshutils.get_iplist_from_name')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.peer_node_list')
+ def test_do_block_iptables(self, mock_peer_list, mock_info, mock_get_iplist, mock_run):
+ mock_peer_list.return_value = ["node1", "node2"]
+ mock_get_iplist.side_effect = [["10.10.10.1", "20.20.20.1"], ["10.10.10.2", "20.20.20.2"]]
+ self.task_sp_inst.do_block_iptables()
+ mock_peer_list.assert_called_once_with()
+ mock_info.assert_has_calls([
+ mock.call("Trying to temporarily block node1 communication ip"),
+ mock.call("Trying to temporarily block node2 communication ip")
+ ])
+ mock_get_iplist.assert_has_calls([
+ mock.call("node1"),
+ mock.call("node2")
+ ])
+ mock_run.assert_has_calls([
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="10.10.10.1")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="20.20.20.1")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="10.10.10.2")),
+ mock.call(config.BLOCK_IP.format(action='I', peer_ip="20.20.20.2"))
+ ])
+
+ @mock.patch('crmsh.crash_test.task.TaskSplitBrain.un_block_iptables')
+ def test_un_block(self, mock_unblock_iptables):
+ self.task_sp_inst.un_block()
+ mock_unblock_iptables.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.crmshutils.get_iplist_from_name')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ def test_un_block_iptables(self, mock_info, mock_get_iplist, mock_run):
+ mock_get_iplist.side_effect = [["10.10.10.1", "20.20.20.1"], ["10.10.10.2", "20.20.20.2"]]
+ self.task_sp_inst.peer_nodelist = ["node1", "node2"]
+ self.task_sp_inst.un_block_iptables()
+ mock_info.assert_has_calls([
+ mock.call("Trying to recover node1 communication ip"),
+ mock.call("Trying to recover node2 communication ip")
+ ])
+ mock_get_iplist.assert_has_calls([
+ mock.call("node1"),
+ mock.call("node2")
+ ])
+ mock_run.assert_has_calls([
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="10.10.10.1")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="20.20.20.1")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="10.10.10.2")),
+ mock.call(config.BLOCK_IP.format(action='D', peer_ip="20.20.20.2"))
+ ])
+
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ def test_run(self, mock_thread, mock_monitor):
+ mock_thread_inst = mock.Mock()
+ mock_thread.return_value = mock_thread_inst
+ self.task_sp_inst.run()
+ mock_thread.assert_called_once_with(target=mock_monitor)
+ mock_thread_inst.start.assert_called_once_with()
+
+ def test_wait(self):
+ self.task_sp_inst.fence_finish_event = mock.Mock()
+ self.task_sp_inst.fence_finish_event.wait.return_value = False
+ self.task_sp_inst.thread_stop_event = mock.Mock()
+ self.task_sp_inst.wait()
+ self.task_sp_inst.fence_finish_event.wait.assert_called_once_with(60)
+ self.task_sp_inst.thread_stop_event.set.assert_called_once_with()
+
+
+class TestFence(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info):
+ """
+ Test setUp.
+ """
+ ctx = mock.Mock(fence_node="node1", yes=False)
+ self.task_fence_inst = task.TaskFence(ctx)
+ self.task_fence_inst.fence_action = "reboot"
+ self.task_fence_inst.fence_timeout = 60
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ expected_res = """==============================================
+Testcase: Fence node node1
+Fence action: reboot
+Fence timeout: 60
+"""
+ res = self.task_fence_inst.header()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.crash_test.utils.json_dumps')
+ @mock.patch('crmsh.crash_test.task.Task.build_base_result')
+ def test_to_json(self, mock_result, mock_json):
+ self.task_fence_inst.result = {}
+ self.task_fence_inst.to_json()
+ mock_result.assert_called_once_with()
+ mock_json.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_no_cmd(self, mock_pre_check, mock_run):
+ mock_run.return_value = (1, None, "error")
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.pre_check()
+ self.assertEqual("error", str(err.exception))
+ mock_run.assert_called_once_with("which crm_node")
+ mock_pre_check.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.check_node_status')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.task_pre_check')
+ def test_pre_check_error(self, mock_pre_check, mock_run, mock_node_status):
+ mock_run.side_effect = [(0, None, None), (0, None, None), (0, None, None)]
+ mock_node_status.return_value = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.pre_check()
+ self.assertEqual("Node \"node1\" not in cluster!", str(err.exception))
+ mock_run.assert_has_calls([
+ mock.call("which crm_node"),
+ mock.call("which stonith_admin"),
+ mock.call("which crm_attribute")
+ ])
+ mock_node_status.assert_called_once_with("node1", "member")
+
+ @mock.patch('crmsh.crash_test.task.Task.fence_action_monitor')
+ @mock.patch('threading.Thread')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ def test_run(self, mock_info, mock_run, mock_thread, mock_monitor):
+ mock_thread_inst = mock.Mock()
+ mock_thread.return_value = mock_thread_inst
+ self.task_fence_inst.run()
+ mock_info.assert_called_once_with("Trying to fence node \"node1\"")
+ mock_run.assert_called_once_with("crm_attribute -t status -N 'node1' -n terminate -v true")
+ mock_thread.assert_called_once_with(target=mock_monitor)
+ mock_thread_inst.start.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ def test_wait_this_node(self, mock_this_node, mock_info):
+ mock_this_node.return_value = "node1"
+ self.task_fence_inst.fence_finish_event = mock.Mock()
+ self.task_fence_inst.thread_stop_event = mock.Mock()
+ self.task_fence_inst.fence_finish_event.wait.return_value = True
+
+ self.task_fence_inst.wait()
+
+ mock_this_node.assert_called_once_with()
+ mock_info.assert_called_once_with("Waiting 60s for self reboot...")
+ self.task_fence_inst.fence_finish_event.wait.assert_called_once_with(60)
+
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ def test_wait(self, mock_this_node, mock_info):
+ mock_this_node.return_value = "node2"
+ self.task_fence_inst.fence_finish_event = mock.Mock()
+ self.task_fence_inst.thread_stop_event = mock.Mock()
+ self.task_fence_inst.fence_finish_event.wait.return_value = None
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fence_inst.wait()
+ self.assertEqual("Target fence node \"node1\" still alive", str(err.exception))
+
+ mock_this_node.assert_called_once_with()
+ mock_info.assert_called_once_with("Waiting 60s for node \"node1\" reboot...")
+ self.task_fence_inst.fence_finish_event.wait.assert_called_once_with(60)
+
+
+class TestTask(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ @mock.patch('crmsh.crash_test.utils.now')
+ def setUp(self, mock_now, mock_info):
+ """
+ Test setUp.
+ """
+ mock_now.return_value = "2019/07/10 01:15:15"
+ main.ctx = mock.Mock(task_list={"process_name": "xin", "age": 38})
+ self.task_inst = task.Task("task description", flush=True)
+ mock_now.assert_called_once_with()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_header(self):
+ self.task_inst.header()
+
+ def test_to_report(self):
+ self.task_inst.to_report()
+
+ def test_to_json(self):
+ self.task_inst.to_json()
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_task_pre_check_exception(self, mock_active):
+ mock_active.return_value = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_inst.task_pre_check()
+ self.assertEqual("Cluster not running!", str(err.exception))
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_task_pre_check_exception_no_fence(self, mock_active):
+ mock_active.return_value = True
+ self.task_inst.get_fence_info = mock.Mock()
+ self.task_inst.fence_enabled = False
+ with self.assertRaises(task.TaskError) as err:
+ self.task_inst.task_pre_check()
+ self.assertEqual("Require stonith enabled", str(err.exception))
+ mock_active.assert_called_once_with("pacemaker.service")
+ self.task_inst.get_fence_info.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.FenceInfo')
+ def test_get_fence_info(self, mock_fence_info):
+ mock_fence_info_inst = mock.Mock()
+ mock_fence_info.return_value = mock_fence_info_inst
+ self.task_inst.get_fence_info()
+
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def test_info(self, mock_info):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.info("info message")
+ self.task_inst.msg_append.assert_called_once_with("info", "info message")
+ mock_info.assert_called_once_with("info message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_warn')
+ def test_warn(self, mock_warn):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.warn("warn message")
+ self.task_inst.msg_append.assert_called_once_with("warn", "warn message")
+ mock_warn.assert_called_once_with("warn message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ def test_error(self, mock_error):
+ self.task_inst.msg_append = mock.Mock()
+ self.task_inst.error("error message")
+ self.task_inst.msg_append.assert_called_once_with("error", "error message")
+ mock_error.assert_called_once_with("error message", to_stdout=True)
+
+ @mock.patch('crmsh.crash_test.utils.now')
+ def test_msg_append(self, mock_now):
+ self.task_inst.to_json = mock.Mock()
+ self.task_inst.to_report = mock.Mock()
+ self.task_inst.msg_append("error", "warn message")
+ mock_now.assert_called_once_with()
+ self.task_inst.to_json.assert_called_once_with()
+ self.task_inst.to_report.assert_called_once_with()
+
+ def test_build_base_result(self):
+ self.task_inst.build_base_result()
+ expected_result = {
+ "Timestamp": self.task_inst.timestamp,
+ "Description": self.task_inst.description,
+ "Messages": []
+ }
+ self.assertDictEqual(expected_result, self.task_inst.result)
+
+ @mock.patch('crmsh.crash_test.utils.warning_ask')
+ def test_print_header(self, mock_ask):
+ self.task_inst.header = mock.Mock()
+ self.task_inst.info = mock.Mock()
+ mock_ask.return_value = False
+
+ with self.assertRaises(crmshutils.TerminateSubCommand):
+ self.task_inst.print_header()
+
+ self.task_inst.header.assert_called_once_with()
+ mock_ask.assert_called_once_with(task.Task.REBOOT_WARNING)
+ self.task_inst.info.assert_called_once_with("Testcase cancelled")
+
+ @mock.patch('crmsh.crash_test.utils.str_to_datetime')
+ @mock.patch('time.sleep')
+ @mock.patch('crmsh.crash_test.task.Task.info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_fence_action_monitor(self, mock_run, mock_info, mock_sleep, mock_datetime):
+ self.task_inst.thread_stop_event = mock.Mock()
+ self.task_inst.thread_stop_event.is_set.side_effect = [False, False, False, False]
+ self.task_inst.fence_start_event = mock.Mock()
+ self.task_inst.fence_finish_event = mock.Mock()
+ output = "Pending Fencing Actions:\n * reboot of 15sp2-2 pending: client=pacemaker-controld.2430, origin=15sp2-1"
+ output2 = "Node 15sp2-2 last fenced at: Tue Jan 19 16:08:37 2021"
+ mock_run.side_effect = [(1, None, None), (0, output, None), (1, None, None), (0, output2, None)]
+ self.task_inst.timestamp = "2021/01/19 16:08:24"
+ mock_datetime.side_effect = [
+ datetime.strptime(self.task_inst.timestamp, '%Y/%m/%d %H:%M:%S'),
+ datetime.strptime("Tue Jan 19 16:08:37 2021", '%a %b %d %H:%M:%S %Y')
+ ]
+
+ self.task_inst.fence_action_monitor()
+
+ self.task_inst.thread_stop_event.is_set.assert_has_calls([
+ mock.call(),
+ mock.call(),
+ mock.call(),
+ mock.call()
+ ])
+ mock_run.assert_has_calls([
+ mock.call("crm_mon -1|grep -A1 \"Fencing Actions:\""),
+ mock.call("crm_mon -1|grep -A1 \"Fencing Actions:\""),
+ mock.call(config.FENCE_HISTORY.format(node="15sp2-2")),
+ mock.call(config.FENCE_HISTORY.format(node="15sp2-2"))
+ ])
+ mock_info.assert_has_calls([
+ mock.call("Node \"15sp2-2\" will be fenced by \"15sp2-1\"!"),
+ mock.call("Node \"15sp2-2\" was successfully fenced by \"15sp2-1\"")
+ ])
+ self.task_inst.fence_start_event.set.assert_called_once_with()
+ self.task_inst.fence_finish_event.set.assert_called_once_with()
+
+class TestFixSBD(TestCase):
+ """
+ Class to test TaskFixSBD of task.py
+ All tested in test_crash_test.py except verify()
+ """
+
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def setUp(self, mock_msg_info, mock_mkstemp, mock_isfile, mock_open):
+ """
+ Test setUp.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ bak = "/tmp/tmpmby3ty9g"
+ edit = "/tmp/tmpnic4t30s"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev)).return_value
+ mock_mkstemp.side_effect = [(1, bak), (2, edit)]
+
+ self.task_fixsbd = task.TaskFixSBD(dev, force=False)
+ mock_msg_info.assert_called_once_with('Replace SBD_DEVICE with candidate {}'.
+ format(dev), to_stdout=False)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+ pass
+
+ @mock.patch('os.fsync')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.crash_test.utils.msg_info')
+ def test_verify_succeed(self, mock_msg_info, mock_isfile, mock_open, mock_fsync):
+ """
+ Test verify successful.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev)).return_value
+ self.task_fixsbd.prev_task_list = []
+
+ self.task_fixsbd.verify()
+ mock_isfile.assert_called_once_with(config.SBD_CONF)
+ mock_msg_info.assert_called_once_with('SBD DEVICE change succeed',
+ to_stdout=True)
+ mock_fsync.assert_called()
+
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.isfile')
+ def test_verify_fail(self, mock_isfile, mock_open):
+ """
+ Test verify failed.
+ """
+ dev = "/dev/disk/by-id/scsi-SATA_ST2000LM007-1R81_WDZ5J42A"
+ dev_cur = "/dev/disk/by-id/scsi-SATA_ST2000LM007-no_change"
+ mock_isfile.return_value = True
+ mock_open.return_value = mock.mock_open(read_data="SBD_DEVICE={}".
+ format(dev_cur)).return_value
+ self.task_fixsbd.prev_task_list = []
+
+ with self.assertRaises(task.TaskError) as err:
+ self.task_fixsbd.verify()
+ mock_isfile.assert_called_once_with(config.SBD_CONF)
+ self.assertEqual("Fail to replace SBD device {} in {}!".
+ format(dev, config.SBD_CONF), str(err.exception))
diff --git a/test/unittests/test_crashtest_utils.py b/test/unittests/test_crashtest_utils.py
new file mode 100644
index 0000000..f8a579b
--- /dev/null
+++ b/test/unittests/test_crashtest_utils.py
@@ -0,0 +1,540 @@
+import os
+import sys
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
+
+try:
+ from unittest import mock, TestCase
+except ImportError:
+ import mock
+import logging
+
+from crmsh.crash_test import utils, main, config
+
+
+class TestMyLoggingFormatter(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.fence_info_inst = utils.FenceInfo()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+
+class TestFenceInfo(TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.fence_info_inst = utils.FenceInfo()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_enabled_false(self, mock_get_property):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_enabled
+ self.assertEqual(res, False)
+ mock_get_property.assert_called_once_with("stonith-enabled")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_enabled_true(self, mock_get_property):
+ mock_get_property.return_value = "True"
+ res = self.fence_info_inst.fence_enabled
+ self.assertEqual(res, True)
+ mock_get_property.assert_called_once_with("stonith-enabled")
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_action_none(self, mock_get_property, mock_error):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_action
+ self.assertEqual(res, None)
+ mock_get_property.assert_called_once_with("stonith-action")
+ mock_error.assert_called_once_with('Cluster property "stonith-action" should be reboot|off|poweroff')
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_action(self, mock_get_property):
+ mock_get_property.return_value = "reboot"
+ res = self.fence_info_inst.fence_action
+ self.assertEqual(res, "reboot")
+ mock_get_property.assert_called_once_with("stonith-action")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_timeout(self, mock_get_property):
+ mock_get_property.return_value = "60s"
+ res = self.fence_info_inst.fence_timeout
+ self.assertEqual(res, "60")
+ mock_get_property.assert_called_once_with("stonith-timeout")
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.get_property')
+ def test_fence_timeout_default(self, mock_get_property):
+ mock_get_property.return_value = None
+ res = self.fence_info_inst.fence_timeout
+ self.assertEqual(res, config.FENCE_TIMEOUT)
+ mock_get_property.assert_called_once_with("stonith-timeout")
+
+
+class TestUtils(TestCase):
+ '''
+ Unitary tests for crash_test/utils.py
+ '''
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.crash_test.utils.datetime')
+ def test_now(self, mock_datetime):
+ mock_now = mock.Mock()
+ mock_datetime.now.return_value = mock_now
+ mock_now.strftime.return_value = "2019/07/05 14:44:55"
+
+ result = utils.now()
+
+ self.assertEqual(result, "2019/07/05 14:44:55")
+ mock_datetime.now.assert_called_once_with()
+ mock_now.strftime.assert_called_once_with("%Y/%m/%d %H:%M:%S")
+
+ @mock.patch('crmsh.crash_test.utils.get_handler')
+ def test_manage_handler(self, mock_get_handler):
+ mock_get_handler.return_value = "handler"
+ utils.logger = mock.Mock()
+ utils.logger.removeHandler = mock.Mock()
+ utils.logger.addHandler = mock.Mock()
+
+ with utils.manage_handler("type1", keep=False):
+ pass
+
+ mock_get_handler.assert_called_once_with(utils.logger, "type1")
+ utils.logger.removeHandler.assert_called_once_with("handler")
+ utils.logger.addHandler.assert_called_once_with("handler")
+
+ @mock.patch('crmsh.crash_test.utils.manage_handler')
+ def test_msg_raw(self, mock_handler):
+ utils.logger = mock.Mock()
+ utils.logger.log = mock.Mock()
+ utils.msg_raw("level1", "msg1")
+ mock_handler.assert_called_once_with("console", True)
+ utils.logger.log.assert_called_once_with("level1", "msg1")
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_info(self, mock_raw):
+ utils.msg_info("msg1")
+ mock_raw.assert_called_once_with(logging.INFO, "msg1", True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_warn(self, mock_raw):
+ utils.msg_warn("msg1")
+ mock_raw.assert_called_once_with(logging.WARNING, "msg1", True)
+
+ @mock.patch('crmsh.crash_test.utils.msg_raw')
+ def test_msg_error(self, mock_raw):
+ utils.msg_error("msg1")
+ mock_raw.assert_called_once_with(logging.ERROR, "msg1", True)
+
+ @mock.patch('os.fsync')
+ @mock.patch('json.dumps')
+ @mock.patch('builtins.open', create=True)
+ def test_json_dumps(self, mock_open_file, mock_dumps, mock_fsync):
+ main.ctx = mock.Mock(jsonfile="file1", task_list={"process_name": "xin", "age": 38})
+ mock_open_write = mock.mock_open()
+ file_handle = mock_open_write.return_value.__enter__.return_value
+ mock_open_file.return_value = mock_open_write.return_value
+ mock_dumps.return_value = "data"
+
+ utils.json_dumps()
+
+ mock_open_file.assert_called_once_with("file1", "w")
+ mock_dumps.assert_called_once_with(main.ctx.task_list, indent=2)
+ file_handle.write.assert_called_once_with("data")
+ file_handle.flush.assert_called_once_with()
+ mock_fsync.assert_called_once_with(file_handle)
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.this_node')
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_this_node_false(self, mock_run, mock_error, mock_this_node):
+ mock_run.return_value = (1, None, "error data")
+ mock_this_node.return_value = "node1"
+
+ res = utils.this_node()
+ self.assertEqual(res, "node1")
+
+ mock_run.assert_called_once_with("crm_node --name")
+ mock_error.assert_called_once_with("error data")
+ mock_this_node.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_this_node(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = utils.this_node()
+ self.assertEqual(res, "data")
+ mock_run.assert_called_once_with("crm_node --name")
+
+ @mock.patch('crmsh.crash_test.utils.datetime')
+ def test_str_to_datetime(self, mock_datetime):
+ utils.str_to_datetime("Mon Nov 2 15:37:11 2020", "%a %b %d %H:%M:%S %Y")
+ mock_datetime.strptime.assert_called_once_with("Mon Nov 2 15:37:11 2020", "%a %b %d %H:%M:%S %Y")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_corosync_port_list(self, mock_run):
+ output = """
+totem.interface.0.bindnetaddr (str) = 10.10.10.121
+totem.interface.0.mcastaddr (str) = 239.101.40.63
+totem.interface.0.mcastport (u16) = 5405
+totem.interface.0.ttl (u8) = 1
+totem.interface.1.bindnetaddr (str) = 20.20.20.121
+totem.interface.1.mcastaddr (str) = 239.6.213.31
+totem.interface.1.mcastport (u16) = 5407
+totem.interface.1.ttl (u8) = 1
+ """
+ mock_run.return_value = (0, output, None)
+ result = utils.corosync_port_list()
+ expected = ['5405', '5407']
+ self.assertListEqual(result, expected)
+ mock_run.assert_called_once_with("corosync-cmapctl totem.interface")
+
+ def test_get_handler(self):
+ mock_handler1 = mock.Mock(_name="test1_handler")
+ mock_handler2 = mock.Mock(_name="test2_handler")
+ mock_logger = mock.Mock(handlers=[mock_handler1, mock_handler2])
+ res = utils.get_handler(mock_logger, "test1_handler")
+ self.assertEqual(res, mock_handler1)
+
+ @mock.patch('os.getuid')
+ def test_is_root(self, mock_getuid):
+ mock_getuid.return_value = 0
+ self.assertEqual(utils.is_root(), True)
+ mock_getuid.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.to_ascii')
+ @mock.patch('os.path.basename')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.join')
+ @mock.patch('os.listdir')
+ def test_get_process_status_false(self, mock_listdir, mock_join, mock_open_file, mock_basename, mock_to_ascii):
+ mock_listdir.return_value = ['1', '2', 'none']
+ mock_join.side_effect = ['/proc/1/cmdline', '/proc/2/cmdline']
+ mock_open_read_1 = mock.mock_open(read_data=b'/usr/sbin/cmd1\x00--user\x00')
+ mock_open_read_2 = mock.mock_open(read_data=b'/usr/sbin/cmd2\x00')
+ mock_open_file.side_effect = [
+ mock_open_read_1.return_value,
+ mock_open_read_2.return_value
+ ]
+ mock_to_ascii.side_effect = [
+ "/usr/sbin/cmd1\x00--user\x00",
+ "/usr/sbin/cmd2\x00"
+ ]
+ mock_basename.side_effect = ["cmd1", "cmd2"]
+
+ rc, pid = utils.get_process_status("sbd")
+ self.assertEqual(rc, False)
+ self.assertEqual(pid, -1)
+
+ mock_listdir.assert_called_once_with('/proc')
+ mock_join.assert_has_calls([
+ mock.call('/proc', '1', 'cmdline'),
+ mock.call('/proc', '2', 'cmdline')
+ ])
+ mock_open_file.assert_has_calls([
+ mock.call('/proc/1/cmdline', 'rb'),
+ mock.call('/proc/2/cmdline', 'rb')
+ ])
+ mock_to_ascii.assert_has_calls([
+ mock.call(b'/usr/sbin/cmd1\x00--user\x00'),
+ mock.call(b'/usr/sbin/cmd2\x00')
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.crmshutils.to_ascii')
+ @mock.patch('os.path.basename')
+ @mock.patch('builtins.open')
+ @mock.patch('os.path.join')
+ @mock.patch('os.listdir')
+ def test_get_process_status(self, mock_listdir, mock_join, mock_open_file, mock_basename, mock_to_ascii):
+ mock_listdir.return_value = ['1', '2', 'none']
+ mock_join.side_effect = ['/proc/1/cmdline', '/proc/2/cmdline']
+ mock_open_read_1 = mock.mock_open(read_data=b'/usr/sbin/cmd1\x00--user\x00')
+ mock_open_read_2 = mock.mock_open(read_data=b'/usr/sbin/sbd\x00')
+ mock_open_file.side_effect = [
+ mock_open_read_1.return_value,
+ mock_open_read_2.return_value
+ ]
+ mock_to_ascii.side_effect = [
+ "/usr/sbin/cmd1\x00--user\x00",
+ "/usr/sbin/sbd\x00"
+ ]
+ mock_basename.side_effect = ["cmd1", "sbd"]
+
+ rc, pid = utils.get_process_status("sbd")
+ self.assertEqual(rc, True)
+ self.assertEqual(pid, 2)
+
+ mock_listdir.assert_called_once_with('/proc')
+ mock_join.assert_has_calls([
+ mock.call('/proc', '1', 'cmdline'),
+ mock.call('/proc', '2', 'cmdline')
+ ])
+ mock_open_file.assert_has_calls([
+ mock.call('/proc/1/cmdline', 'rb'),
+ mock.call('/proc/2/cmdline', 'rb')
+ ])
+ mock_to_ascii.assert_has_calls([
+ mock.call(b'/usr/sbin/cmd1\x00--user\x00'),
+ mock.call(b'/usr/sbin/sbd\x00')
+ ])
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_check_node_status_error_cmd(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ res = utils.check_node_status("node1", "member")
+ self.assertEqual(res, False)
+ mock_run.assert_called_once_with("crm_node -l")
+ mock_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_check_node_status(self, mock_run, mock_error):
+ output = """
+1084783297 15sp2-1 member
+1084783193 15sp2-2 lost
+ """
+ mock_run.return_value = (0, output, None)
+
+ res = utils.check_node_status("15sp2-2", "member")
+ self.assertEqual(res, False)
+ res = utils.check_node_status("15sp2-1", "member")
+ self.assertEqual(res, True)
+
+ mock_run.assert_has_calls([
+ mock.call("crm_node -l"),
+ mock.call("crm_node -l")
+ ])
+ mock_error.assert_not_called()
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_online_nodes_empty(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = utils.online_nodes()
+ self.assertEqual(res, [])
+ mock_run.assert_called_once_with("crm_mon -1")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_online_nodes(self, mock_run):
+ output = """
+Node List:
+ * Online: [ 15sp2-1 15sp2-2 ]
+ """
+ mock_run.return_value = (0, output, None)
+ res = utils.online_nodes()
+ self.assertEqual(res, ["15sp2-1", "15sp2-2"])
+ mock_run.assert_called_once_with("crm_mon -1")
+
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ def test_peer_node_list_empty(self, mock_online):
+ mock_online.return_value = None
+ res = utils.peer_node_list()
+ self.assertEqual(res, [])
+ mock_online.assert_called_once_with()
+
+ @mock.patch('crmsh.crash_test.utils.this_node')
+ @mock.patch('crmsh.crash_test.utils.online_nodes')
+ def test_peer_node_list(self, mock_online, mock_this_node):
+ mock_online.return_value = ["node1", "node2"]
+ mock_this_node.return_value = "node1"
+ res = utils.peer_node_list()
+ self.assertEqual(res, ["node2"])
+ mock_online.assert_called_once_with()
+
+ # Test is_valid_sbd():
+ @classmethod
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_not_exist(cls, mock_os_path_exists):
+ """
+ Test device not exist
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ mock_os_path_exists.return_value = False
+
+ res = utils.is_valid_sbd(dev)
+ assert res is False
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_cmd_error(cls, mock_os_path_exists,
+ mock_sbd_check_header, mock_msg_err):
+ """
+ Test device is not valid sbd
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (-1, None, "Unknown error!")
+ mock_msg_err.return_value = ""
+
+ res = utils.is_valid_sbd(dev)
+ mock_msg_err.assert_called_once_with("Unknown error!")
+ assert res is False
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.msg_error')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_not_sbd(cls, mock_os_path_exists,
+ mock_sbd_check_header, mock_msg_err):
+ """
+ Test device is not SBD device
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ err_output = """
+==Dumping header on disk {}
+==Header on disk {} NOT dumped
+sbd failed; please check the logs.
+""".format(dev, dev)
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (1, "==Dumping header on disk {}".format(dev),
+ err_output)
+
+ res = utils.is_valid_sbd(dev)
+ assert res is False
+ mock_msg_err.assert_called_once_with(err_output)
+
+ @classmethod
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ @mock.patch('os.path.exists')
+ def test_is_valid_sbd_is_sbd(cls, mock_os_path_exists,
+ mock_sbd_check_header):
+ """
+ Test device is not SBD device
+ """
+ dev = "/dev/disk/by-id/scsi-device1"
+ std_output = """
+==Dumping header on disk {}
+Header version : 2.1
+UUID : f4c99362-6522-46fc-8ce4-7db60aff19bb
+Number of slots : 255
+Sector size : 512
+Timeout (watchdog) : 5
+Timeout (allocate) : 2
+Timeout (loop) : 1
+Timeout (msgwait) : 10
+==Header on disk {} is dumped
+""".format(dev, dev)
+ mock_os_path_exists.return_value = True
+ mock_sbd_check_header.return_value = (0, std_output, None)
+
+ res = utils.is_valid_sbd(dev)
+ assert res is True
+
+ # Test find_candidate_sbd() and _find_match_count()
+ @classmethod
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_no_dev(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob):
+ """
+ Test no suitable device
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = []
+
+ res = utils.find_candidate_sbd("/not-exist-folder/not-exist-dev")
+ assert res == ""
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_no_can(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob, mock_is_valid_sbd):
+ """
+ Test no valid candidate device
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = ["/dev/disk/by-id/scsi-label_DE_devA",
+ "/dev/disk/by-id/scsi-label_DE_devB",
+ "/dev/disk/by-id/scsi-label_DE_devC",
+ "/dev/disk/by-id/scsi-label_DE_devD"]
+ mock_is_valid_sbd.side_effect = [False, False, False, False]
+
+ res = utils.find_candidate_sbd("/dev/disk/by-id/scsi-label_CN_devA")
+ assert res == ""
+
+ @classmethod
+ @mock.patch('crmsh.crash_test.utils.is_valid_sbd')
+ @mock.patch('glob.glob')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.dirname')
+ def test_find_candidate_has_multi(cls, mock_os_path_dname, mock_os_path_bname,
+ mock_glob, mock_is_valid_sbd):
+ """
+ Test has multiple valid candidate devices
+ """
+ mock_os_path_dname.return_value = "/dev/disk/by-id"
+ mock_os_path_bname.return_value = "scsi-label_CN_devA"
+ mock_glob.return_value = ["/dev/disk/by-id/scsi-label_DE_devA",
+ "/dev/disk/by-id/scsi-label_DE_devB",
+ "/dev/disk/by-id/scsi-label_CN_devC",
+ "/dev/disk/by-id/scsi-label_CN_devD",
+ "/dev/disk/by-id/scsi-mp_China_devE",
+ "/dev/disk/by-id/scsi-mp_China_devF"]
+ mock_is_valid_sbd.side_effect = [True, False, False, True, True, False]
+
+ res = utils.find_candidate_sbd("/dev/disk/by-id/scsi-label_CN_devA")
+ assert res == "/dev/disk/by-id/scsi-label_CN_devD"
diff --git a/test/unittests/test_gv.py b/test/unittests/test_gv.py
new file mode 100644
index 0000000..fda7272
--- /dev/null
+++ b/test/unittests/test_gv.py
@@ -0,0 +1,36 @@
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+import re
+
+from crmsh import crm_gv
+from crmsh import cibconfig
+
+
+def test_digits_ident():
+ g = crm_gv.gv_types["dot"]()
+ cibconfig.set_graph_attrs(g, ".")
+
+ g.new_node("1a", top_node=True)
+ g.new_attr("1a", 'label', "1a")
+ g.new_node("a", top_node=True)
+ g.new_attr("a", 'label', "a")
+
+ expected = [
+ 'fontname="Helvetica";',
+ 'fontsize="11";',
+ 'compound="true";',
+ '"1a" [label="1a"];',
+ 'a [label="a"];',
+ ]
+ out = '\n'.join(g.repr()).replace('\t', '')
+
+ for line in re.match(
+ r'^digraph G {\n\n(?P<expected>.*)\n}$', out, re.M | re.S
+ ).group('expected').split('\n'):
+ assert line in expected
+ expected.remove(line)
+
+ assert len(expected) == 0
diff --git a/test/unittests/test_handles.py b/test/unittests/test_handles.py
new file mode 100644
index 0000000..54cd634
--- /dev/null
+++ b/test/unittests/test_handles.py
@@ -0,0 +1,166 @@
+from __future__ import unicode_literals
+# Copyright (C) 2015 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import handles
+
+
+def test_basic():
+ t = """{{foo}}"""
+ assert "hello" == handles.parse(t, {'foo': 'hello'})
+ t = """{{foo:bar}}"""
+ assert "hello" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ t = """{{wiz}}"""
+ assert "" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ t = """{{foo}}.{{wiz}}"""
+ assert "a.b" == handles.parse(t, {'foo': "a", 'wiz': "b"})
+ t = """Here's a line of text
+ followed by another line
+ followed by some {{foo}}.{{wiz}}
+ and then some at the end"""
+ assert """Here's a line of text
+ followed by another line
+ followed by some a.b
+ and then some at the end""" == handles.parse(t, {'foo': "a", 'wiz': "b"})
+
+
+def test_weird_chars():
+ t = "{{foo#_bar}}"
+ assert "hello" == handles.parse(t, {'foo#_bar': 'hello'})
+ t = "{{_foo$bar_}}"
+ assert "hello" == handles.parse(t, {'_foo$bar_': 'hello'})
+
+
+def test_conditional():
+ t = """{{#foo}}before{{foo:bar}}after{{/foo}}"""
+ assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}})
+ assert "" == handles.parse(t, {'faa': {'bar': 'hello'}})
+
+ t = """{{#cond}}before{{foo:bar}}after{{/cond}}"""
+ assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': True})
+ assert "" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': False})
+
+
+def test_iteration():
+ t = """{{#foo}}!{{foo:bar}}!{{/foo}}"""
+ assert "!hello!!there!" == handles.parse(t, {'foo': [{'bar': 'hello'}, {'bar': 'there'}]})
+
+
+def test_result():
+ t = """{{obj}}
+ group g1 {{obj:id}}
+"""
+ assert """primitive d0 Dummy
+ group g1 d0
+""" == handles.parse(t, {'obj': handles.value({'id': 'd0'}, 'primitive d0 Dummy')})
+ assert "\n group g1 \n" == handles.parse(t, {})
+
+
+def test_result2():
+ t = """{{obj}}
+ group g1 {{obj:id}}
+{{#obj}}
+{{obj}}
+{{/obj}}
+"""
+ assert """primitive d0 Dummy
+ group g1 d0
+primitive d0 Dummy
+""" == handles.parse(t, {'obj': handles.value({'id': 'd0'}, 'primitive d0 Dummy')})
+ assert "\n group g1 \n" == handles.parse(t, {})
+
+
+def test_mustasche():
+ t = """Hello {{name}}
+You have just won {{value}} dollars!
+{{#in_ca}}
+Well, {{taxed_value}} dollars, after taxes.
+{{/in_ca}}
+"""
+ v = {
+ "name": "Chris",
+ "value": 10000,
+ "taxed_value": 10000 - (10000 * 0.4),
+ "in_ca": True
+ }
+
+ assert """Hello Chris
+You have just won 10000 dollars!
+Well, 6000.0 dollars, after taxes.
+""" == handles.parse(t, v)
+
+
+def test_invert():
+ t = """{{#repo}}
+<b>{{name}}</b>
+{{/repo}}
+{{^repo}}
+No repos :(
+{{/repo}}
+"""
+ v = {
+ "repo": []
+ }
+
+ assert """
+No repos :(
+""" == handles.parse(t, v)
+
+
+def test_invert_2():
+ t = """foo
+{{#repo}}
+<b>{{name}}</b>
+{{/repo}}
+{{^repo}}
+No repos :(
+{{/repo}}
+bar
+"""
+ v = {
+ "repo": []
+ }
+
+ assert """foo
+No repos :(
+bar
+""" == handles.parse(t, v)
+
+
+def test_cib():
+ t = """{{filesystem}}
+{{exportfs}}
+{{rootfs}}
+{{virtual-ip}}
+clone c-{{rootfs:id}} {{rootfs:id}}
+group g-nfs
+ {{exportfs:id}}
+ {{virtual-ip:id}}
+order base-then-nfs inf: {{filesystem:id}} g-nfs
+colocation nfs-with-base inf: g-nfs {{filesystem:id}}
+order rootfs-before-nfs inf: c-{{rootfs:id}} g-nfs:start
+colocation nfs-with-rootfs inf: g-nfs c-{{rootfs:id}}
+"""
+ r = """primitive fs1 Filesystem
+primitive efs exportfs
+primitive rfs rootfs
+primitive vip IPaddr2
+ params ip=192.168.0.2
+clone c-rfs rfs
+group g-nfs
+ efs
+ vip
+order base-then-nfs inf: fs1 g-nfs
+colocation nfs-with-base inf: g-nfs fs1
+order rootfs-before-nfs inf: c-rfs g-nfs:start
+colocation nfs-with-rootfs inf: g-nfs c-rfs
+"""
+ v = {
+ 'filesystem': handles.value({'id': 'fs1'}, 'primitive fs1 Filesystem'),
+ 'exportfs': handles.value({'id': 'efs'}, 'primitive efs exportfs'),
+ 'rootfs': handles.value({'id': 'rfs'}, 'primitive rfs rootfs'),
+ 'virtual-ip': handles.value({'id': 'vip'},
+ 'primitive vip IPaddr2\n params ip=192.168.0.2'),
+ }
+ assert r == handles.parse(t, v)
diff --git a/test/unittests/test_lock.py b/test/unittests/test_lock.py
new file mode 100644
index 0000000..a8dc982
--- /dev/null
+++ b/test/unittests/test_lock.py
@@ -0,0 +1,271 @@
+"""
+Unitary tests for crmsh/lock.py
+
+:author: xinliang
+:organization: SUSE Linux GmbH
+:contact: XLiang@suse.de
+
+:since: 2020-12-18
+"""
+
+# pylint:disable=C0103,C0111,W0212,W0611
+
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import lock, config
+
+
+class TestLock(unittest.TestCase):
+ """
+ Unitary tests for crmsh.lock.Lock
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.local_inst = lock.Lock()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_run(self, mock_run):
+ mock_run.return_value = (0, "output data", None)
+ rc, out, err = self.local_inst._run("test_cmd")
+ mock_run.assert_called_once_with("test_cmd")
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_create_lock_dir_false(self, mock_run):
+ mock_run.return_value = (1, None, None)
+ rc = self.local_inst._create_lock_dir()
+ self.assertEqual(rc, False)
+ mock_run.assert_called_once_with("mkdir {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_create_lock_dir(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ rc = self.local_inst._create_lock_dir()
+ self.assertEqual(rc, True)
+ mock_run.assert_called_once_with("mkdir {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ def test_lock_or_fail(self, mock_create):
+ mock_create.return_value = False
+ with self.assertRaises(lock.ClaimLockError) as err:
+ self.local_inst._lock_or_fail()
+ self.assertEqual("Failed to claim lock (the lock directory exists at {})".format(lock.Lock.LOCK_DIR_DEFAULT), str(err.exception))
+ mock_create.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._run')
+ def test_unlock(self, mock_run):
+ self.local_inst.lock_owner = True
+ self.local_inst._unlock()
+ mock_run.assert_called_once_with("rm -rf {}".format(lock.Lock.LOCK_DIR_DEFAULT))
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.Lock._lock_or_fail')
+ def test_lock_exception(self, mock_lock, mock_unlock):
+ mock_lock.side_effect = lock.ClaimLockError
+
+ with self.assertRaises(lock.ClaimLockError):
+ with self.local_inst.lock():
+ pass
+
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.Lock._lock_or_fail')
+ def test_lock(self, mock_lock, mock_unlock):
+ with self.local_inst.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+
+class TestRemoteLock(unittest.TestCase):
+ """
+ Unitary tests for crmsh.lock.RemoteLock
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.lock_inst = lock.RemoteLock("node1")
+ self.lock_inst_no_wait = lock.RemoteLock("node1", wait=False)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_run_ssh_error(self, mock_run):
+ mock_run.return_value = (255, None, "ssh error")
+ with self.assertRaises(lock.SSHError) as err:
+ self.lock_inst._run("cmd")
+ self.assertEqual("ssh error", str(err.exception))
+ mock_run.assert_called_once_with("node1", "cmd")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
+ def test_run(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ res = self.lock_inst._run("cmd")
+ self.assertEqual(res, mock_run.return_value)
+ mock_run.assert_called_once_with("node1", "cmd")
+
+ def test_lock_timeout_error_format(self):
+ config.core.lock_timeout = "pwd"
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst.lock_timeout
+ self.assertEqual("Invalid format of core.lock_timeout(should be a number)", str(err.exception))
+
+ def test_lock_timeout_min_error(self):
+ config.core.lock_timeout = "12"
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst.lock_timeout
+ self.assertEqual("Minimum value of core.lock_timeout should be 120", str(err.exception))
+
+ def test_lock_timeout(self):
+ config.core.lock_timeout = "130"
+ self.assertEqual(self.lock_inst.lock_timeout, 130)
+
+ @mock.patch('crmsh.lock.RemoteLock._run')
+ def test_get_online_nodelist_error(self, mock_run):
+ mock_run.return_value = (1, None, "error data")
+ with self.assertRaises(ValueError) as err:
+ self.lock_inst._get_online_nodelist()
+ self.assertEqual("error data", str(err.exception))
+ mock_run.assert_called_once_with("crm_node -l")
+
+ @mock.patch('crmsh.lock.RemoteLock._run')
+ def test_get_online_nodelist(self, mock_run):
+ output = """
+ 1084783297 15sp2-1 member
+ 1084783193 15sp2-2 lost
+ 1084783331 15sp2-3 member
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.lock_inst._get_online_nodelist()
+ self.assertEqual(res, ["15sp2-1", "15sp2-3"])
+ mock_run.assert_called_once_with("crm_node -l")
+
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_break(self, mock_time, mock_time_out, mock_create):
+ mock_time.return_value = 10000
+ mock_time_out.return_value = 120
+ mock_create.return_value = True
+
+ self.lock_inst._lock_or_wait()
+
+ mock_time.assert_called_once_with()
+ mock_time_out.assert_called_once_with()
+
+ @mock.patch('time.sleep')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.lock.RemoteLock._get_online_nodelist')
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_timed_out(self, mock_time, mock_time_out, mock_create,
+ mock_get_nodelist, mock_warn, mock_sleep):
+ mock_time.side_effect = [10000, 10121]
+ mock_time_out.return_value = 120
+ mock_create.return_value = False
+ mock_get_nodelist.return_value = ["node2"]
+
+ with self.assertRaises(lock.ClaimLockError) as err:
+ self.lock_inst._lock_or_wait()
+ self.assertEqual("Timed out after 120 seconds. Cannot continue since the lock directory exists at the node (node1:{})".format(lock.Lock.LOCK_DIR_DEFAULT), str(err.exception))
+
+ mock_time.assert_has_calls([ mock.call(), mock.call()])
+ mock_time_out.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_create.assert_called_once_with()
+ mock_get_nodelist.assert_called_once_with()
+ mock_warn.assert_called_once_with('Might have unfinished process on other nodes, wait %ss...', 120)
+ mock_sleep.assert_called_once_with(10)
+
+ @mock.patch('time.sleep')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.lock.RemoteLock._get_online_nodelist')
+ @mock.patch('crmsh.lock.Lock._create_lock_dir')
+ @mock.patch('crmsh.lock.RemoteLock.lock_timeout', new_callable=mock.PropertyMock)
+ @mock.patch('time.time')
+ def test_lock_or_wait_again(self, mock_time, mock_time_out, mock_create,
+ mock_get_nodelist, mock_warn, mock_sleep):
+ mock_time.side_effect = [10000, 10010, 10020]
+ mock_time_out.side_effect = [120, 120, 120]
+ mock_create.side_effect = [False, False, True]
+ mock_get_nodelist.side_effect = [["node1"], ["node1", "node2"]]
+
+ self.lock_inst._lock_or_wait()
+
+ mock_time.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_time_out.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_create.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_get_nodelist.assert_has_calls([mock.call(), mock.call()])
+ mock_warn.assert_called_once_with('Might have unfinished process on other nodes, wait %ss...', 120)
+ mock_sleep.assert_has_calls([mock.call(10), mock.call(10)])
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_wait')
+ def test_lock_exception(self, mock_lock, mock_unlock):
+ mock_lock.side_effect = lock.ClaimLockError
+
+ with self.assertRaises(lock.ClaimLockError):
+ with self.lock_inst.lock():
+ pass
+
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_wait')
+ def test_lock(self, mock_lock, mock_unlock):
+ with self.lock_inst.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
+
+ @mock.patch('crmsh.lock.Lock._unlock')
+ @mock.patch('crmsh.lock.RemoteLock._lock_or_fail')
+ def test_lock_no_wait(self, mock_lock, mock_unlock):
+ with self.lock_inst_no_wait.lock():
+ pass
+ mock_lock.assert_called_once_with()
+ mock_unlock.assert_called_once_with()
diff --git a/test/unittests/test_objset.py b/test/unittests/test_objset.py
new file mode 100644
index 0000000..cae39ca
--- /dev/null
+++ b/test/unittests/test_objset.py
@@ -0,0 +1,40 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import cibconfig
+
+factory = cibconfig.cib_factory
+
+
+def assert_in(needle, haystack):
+ if needle not in haystack:
+ message = "%s not in %s" % (needle, haystack)
+ raise AssertionError(message)
+
+
+def setup_function():
+ "set up test fixtures"
+ from crmsh import idmgmt
+ idmgmt.clear()
+
+
+def teardown_function():
+ pass
+
+
+def test_nodes_nocli():
+ for n in factory.node_id_list():
+ obj = factory.find_object(n)
+ if obj is not None:
+ assert obj.node is not None
+ assert True == obj.cli_use_validate()
+ assert False == obj.nocli
+
+
+def test_show():
+ setobj = cibconfig.mkset_obj()
+ s = setobj.repr_nopretty()
+ sp = s.splitlines()
+ assert_in("node ha-one", sp[0:3])
diff --git a/test/unittests/test_ocfs2.py b/test/unittests/test_ocfs2.py
new file mode 100644
index 0000000..603c68d
--- /dev/null
+++ b/test/unittests/test_ocfs2.py
@@ -0,0 +1,465 @@
+import logging
+import unittest
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from crmsh import ocfs2, utils, ra, constants
+
+logging.basicConfig(level=logging.INFO)
+
+class TestOCFS2Manager(unittest.TestCase):
+ """
+ Unitary tests for crmsh.bootstrap.SBDManager
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ context1 = mock.Mock(ocfs2_devices=[])
+ self.ocfs2_inst1 = ocfs2.OCFS2Manager(context1)
+
+ context2 = mock.Mock(ocfs2_devices=[],
+ stage="ocfs2",
+ yes_to_all=True)
+ self.ocfs2_inst2 = ocfs2.OCFS2Manager(context2)
+
+ context3 = mock.Mock(ocfs2_devices=["/dev/sdb2", "/dev/sdc2"],
+ use_cluster_lvm2=False)
+ self.ocfs2_inst3 = ocfs2.OCFS2Manager(context3)
+
+ context4 = mock.Mock(ocfs2_devices=[],
+ use_cluster_lvm2=True)
+ self.ocfs2_inst4 = ocfs2.OCFS2Manager(context4)
+
+ context5 = mock.Mock(ocfs2_devices=["/dev/sda2", "/dev/sda2"])
+ self.ocfs2_inst5 = ocfs2.OCFS2Manager(context5)
+
+ context6 = mock.Mock(ocfs2_devices=["/dev/sda2"],
+ mount_point="/data")
+ self.ocfs2_inst6 = ocfs2.OCFS2Manager(context6)
+
+ context7 = mock.Mock(ocfs2_devices=["/dev/sdb2"],
+ use_cluster_lvm2=True)
+ self.ocfs2_inst7 = ocfs2.OCFS2Manager(context7)
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_verify_packages(self, mock_installed):
+ mock_installed.side_effect = [True, False]
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst1._verify_packages(use_cluster_lvm2=True)
+ self.assertEqual("Missing required package for configuring OCFS2: lvm2-lockd", str(err.exception))
+ mock_installed.assert_has_calls([
+ mock.call("ocfs2-tools"),
+ mock.call("lvm2-lockd")
+ ])
+
+ def test_verify_options_stage_miss_option(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst2._verify_options()
+ self.assertEqual("ocfs2 stage require -o option", str(err.exception))
+
+ def test_verify_options_two_devices(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._verify_options()
+ self.assertEqual("Without Cluster LVM2 (-C option), -o option only support one device", str(err.exception))
+
+ def test_verify_options_only_C(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst4._verify_options()
+ self.assertEqual("-C option only valid together with -o option", str(err.exception))
+
+ @mock.patch('crmsh.utils.has_mount_point_used')
+ def test_verify_options_mount(self, mock_mount):
+ mock_mount.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst6._verify_options()
+ self.assertEqual("Mount point /data already mounted", str(err.exception))
+ mock_mount.assert_called_once_with("/data")
+
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_not_block(self, mock_is_block):
+ mock_is_block.return_value = False
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._verify_devices()
+ self.assertEqual("/dev/sdb2 doesn't look like a block device", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+
+ @mock.patch('crmsh.utils.is_dev_used_for_lvm')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_lvm(self, mock_is_block, mock_lvm):
+ mock_lvm.return_value = True
+ mock_is_block.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst7._verify_devices()
+ self.assertEqual("/dev/sdb2 is a Logical Volume, cannot be used with the -C option", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+ mock_lvm.assert_called_once_with("/dev/sdb2")
+
+ @mock.patch('crmsh.utils.has_disk_mounted')
+ @mock.patch('crmsh.utils.is_dev_used_for_lvm')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_devices_mounted(self, mock_is_block, mock_lvm, mock_mounted):
+ mock_lvm.return_value = False
+ mock_is_block.return_value = True
+ mock_mounted.return_value = True
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst7._verify_devices()
+ self.assertEqual("/dev/sdb2 already mounted", str(err.exception))
+ mock_is_block.assert_called_once_with("/dev/sdb2")
+ mock_lvm.assert_called_once_with("/dev/sdb2")
+ mock_mounted.assert_called_once_with("/dev/sdb2")
+
+ def test_check_if_already_configured_return(self):
+ self.ocfs2_inst3._check_if_already_configured()
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_check_if_already_configured(self, mock_run, mock_info):
+ mock_run.return_value = "data xxx fstype=ocfs2 sss"
+ with self.assertRaises(utils.TerminateSubCommand):
+ self.ocfs2_inst2._check_if_already_configured()
+ mock_run.assert_called_once_with("crm configure show")
+ mock_info.assert_called_once_with("Already configured OCFS2 related resources")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_devices')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._check_if_already_configured')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_options')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages')
+ def test_static_verify(self, mock_verify_packages, mock_verify_options, mock_configured, mock_verify_devices):
+ self.ocfs2_inst3._static_verify()
+ mock_verify_packages.assert_called_once_with(False)
+ mock_verify_options.assert_called_once_with()
+ mock_configured.assert_called_once_with()
+ mock_verify_devices.assert_called_once_with()
+
+ def test_dynamic_raise_error(self):
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst2._dynamic_raise_error("error messages")
+ self.assertEqual("error messages", str(err.exception))
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ def test_check_sbd_and_ocfs2_dev(self, mock_enabled, mock_get_device, mock_error):
+ mock_enabled.return_value = True
+ mock_get_device.return_value = ["/dev/sdb2"]
+ self.ocfs2_inst3._check_sbd_and_ocfs2_dev()
+ mock_enabled.assert_called_once_with("sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_error.assert_called_once_with("/dev/sdb2 cannot be the same with SBD device")
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.get_dev_fs_type')
+ @mock.patch('crmsh.utils.has_dev_partitioned')
+ def test_confirm_to_overwrite_ocfs2_dev(self, mock_has_parted, mock_fstype, mock_confirm):
+ mock_has_parted.side_effect = [True, False]
+ mock_fstype.return_value = "ext4"
+ mock_confirm.side_effect = [True, False]
+ with self.assertRaises(utils.TerminateSubCommand) as err:
+ self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev()
+ mock_has_parted.assert_has_calls([
+ mock.call("/dev/sdb2"),
+ mock.call("/dev/sdc2")
+ ])
+ mock_fstype.assert_called_once_with("/dev/sdc2")
+ mock_confirm.assert_has_calls([
+ mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"),
+ mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?")
+ ])
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('crmsh.utils.get_dev_fs_type')
+ @mock.patch('crmsh.utils.has_dev_partitioned')
+ def test_confirm_to_overwrite_ocfs2_dev_confirmed(self, mock_has_parted, mock_fstype, mock_confirm, mock_run):
+ mock_has_parted.side_effect = [True, False]
+ mock_fstype.return_value = "ext4"
+ mock_confirm.side_effect = [True, True]
+ self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev()
+ mock_has_parted.assert_has_calls([
+ mock.call("/dev/sdb2"),
+ mock.call("/dev/sdc2")
+ ])
+ mock_fstype.assert_called_once_with("/dev/sdc2")
+ mock_confirm.assert_has_calls([
+ mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"),
+ mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?")
+ ])
+ mock_run.assert_has_calls([
+ mock.call("wipefs -a /dev/sdb2"),
+ mock.call("wipefs -a /dev/sdc2")
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error')
+ @mock.patch('crmsh.utils.has_stonith_running')
+ def test_dynamic_verify_error(self, mock_has_stonith, mock_error):
+ mock_has_stonith.return_value = False
+ mock_error.side_effect = SystemExit
+ with self.assertRaises(SystemExit):
+ self.ocfs2_inst3._dynamic_verify()
+ mock_has_stonith.assert_called_once_with()
+ mock_error.assert_called_once_with("OCFS2 requires stonith device configured and running")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._confirm_to_overwrite_ocfs2_dev')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._check_sbd_and_ocfs2_dev')
+ @mock.patch('crmsh.utils.has_stonith_running')
+ def test_dynamic_verify(self, mock_has_stonith, mock_check_dev, mock_confirm):
+ mock_has_stonith.return_value = True
+ self.ocfs2_inst3._dynamic_verify()
+ mock_has_stonith.assert_called_once_with()
+ mock_check_dev.assert_called_once_with()
+ mock_confirm.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.gen_unused_id')
+ def test_gen_ra_scripts(self, mock_gen_unused):
+ self.ocfs2_inst3.exist_ra_id_list = []
+ mock_gen_unused.return_value = "g1"
+ res = self.ocfs2_inst3._gen_ra_scripts("GROUP", {"id": "g1", "ra_string": "d vip"})
+ assert res == ("g1", "\ngroup g1 d vip")
+ mock_gen_unused.assert_called_once_with([], "g1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_mkfs(self, mock_long, mock_get_value, mock_run):
+ mock_get_value.return_value = "hacluster"
+ self.ocfs2_inst3._mkfs("/dev/sdb2")
+ mock_long.assert_called_once_with(" Creating OCFS2 filesystem for /dev/sdb2")
+ mock_get_value.assert_called_once_with("totem.cluster_name")
+ mock_run.assert_called_once_with("mkfs.ocfs2 --cluster-stack pcmk --cluster-name hacluster -N 8 -x /dev/sdb2")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_vg_change(self, mock_run):
+ self.ocfs2_inst3.vg_id = "vg1"
+ with self.ocfs2_inst3._vg_change():
+ pass
+ mock_run.assert_has_calls([
+ mock.call("vgchange -ay vg1"),
+ mock.call("vgchange -an vg1")
+ ])
+
+ @mock.patch('crmsh.utils.get_pe_number')
+ @mock.patch('crmsh.utils.gen_unused_id')
+ @mock.patch('crmsh.utils.get_all_vg_name')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ def test_create_lv(self, mock_long, mock_run, mock_all_vg, mock_unused, mock_pe_num):
+ mock_all_vg.return_value = []
+ mock_unused.return_value = "vg1"
+ mock_pe_num.return_value = 1234
+ res = self.ocfs2_inst3._create_lv()
+ self.assertEqual(res, "/dev/vg1/ocfs2-lv")
+ mock_run.assert_has_calls([
+ mock.call("pvcreate /dev/sdb2 /dev/sdc2 -y"),
+ mock.call("vgcreate --shared vg1 /dev/sdb2 /dev/sdc2 -y"),
+ mock.call("lvcreate -l 1234 vg1 -n ocfs2-lv -y")
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_gen_group_and_clone_scripts(self, mock_gen):
+ mock_gen.side_effect = [("id1", "group_script\n"), ("id2", "clone_script\n")]
+ res = self.ocfs2_inst3._gen_group_and_clone_scripts(["ra1", "ra2"])
+ self.assertEqual(res, "group_script\nclone_script\n")
+ mock_gen.assert_has_calls([
+ mock.call('GROUP', {'id': 'ocfs2-group', 'ra_string': 'ra1 ra2'}),
+ mock.call('CLONE', {'id': 'ocfs2-clone', 'group_id': 'id1'})
+ ])
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_gen_fs_scripts(self, mock_gen):
+ mock_gen.return_value = "scripts"
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3.target_device = "/dev/sda1"
+ res = self.ocfs2_inst3._gen_fs_scripts()
+ self.assertEqual(res, "scripts")
+ mock_gen.assert_called_once_with("Filesystem", {'id': 'ocfs2-clusterfs', 'mnt_point': '/data', 'fs_type': 'ocfs2', 'device': '/dev/sda1'})
+
+ @mock.patch('crmsh.bootstrap.wait_for_resource')
+ @mock.patch('crmsh.utils.append_res_to_group')
+ @mock.patch('crmsh.bootstrap.crm_configure_load')
+ def test_load_append_and_wait(self, mock_load, mock_append, mock_wait):
+ self.ocfs2_inst3.group_id = "g1"
+ self.ocfs2_inst3._load_append_and_wait("scripts", "res_id", "messages data")
+ mock_load.assert_called_once_with("update", "scripts")
+ mock_append.assert_called_once_with("g1", "res_id")
+ mock_wait.assert_called_once_with("messages data", "res_id")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_group_and_clone_scripts')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_dlm(self, mock_gen_ra, mock_gen_group, mock_load_wait):
+ mock_gen_ra.return_value = ("dlm_id", "dlm_scripts\n")
+ mock_gen_group.return_value = "group_scripts\n"
+ self.ocfs2_inst3._config_dlm()
+ mock_gen_ra.assert_called_once_with("DLM", {"id": "ocfs2-dlm"})
+ mock_gen_group.assert_called_once_with(["dlm_id"])
+ mock_load_wait.assert_called_once_with("dlm_scripts\ngroup_scripts\n", "dlm_id", " Wait for DLM(dlm_id) start", need_append=False)
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_lvmlockd(self, mock_gen_ra, mock_load_wait):
+ mock_gen_ra.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3._config_lvmlockd()
+ mock_gen_ra.assert_called_once_with("LVMLockd", {"id": "ocfs2-lvmlockd"})
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMLockd(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts')
+ def test_config_lvmactivate(self, mock_gen_ra, mock_load_wait):
+ mock_gen_ra.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3.vg_id = "vg1"
+ self.ocfs2_inst3._config_lvmactivate()
+ mock_gen_ra.assert_called_once_with("LVMActivate", {"id": "ocfs2-lvmactivate", "vgname": "vg1"})
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMActivate(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_fs_scripts')
+ @mock.patch('crmsh.utils.mkdirp')
+ def test_config_fs(self, mock_mkdir, mock_gen_fs, mock_load_wait):
+ mock_gen_fs.return_value = ("ra_id", "ra_scripts\n")
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3._config_fs()
+ mock_mkdir.assert_called_once_with("/data")
+ mock_gen_fs.assert_called_once_with()
+ mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for Filesystem(ra_id) start")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmactivate')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._vg_change')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._create_lv')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmlockd')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm')
+ def test_config_resource_stack_lvm2(self, mock_dlm, mock_lvmlockd, mock_lv, mock_vg, mock_mkfs, mock_lvmactivate, mock_fs):
+ mock_lv.return_value = "/dev/sda1"
+ self.ocfs2_inst3._config_resource_stack_lvm2()
+ mock_dlm.assert_called_once_with()
+ mock_lvmlockd.assert_called_once_with()
+ mock_lv.assert_called_once_with()
+ mock_mkfs.assert_called_once_with("/dev/sda1")
+ mock_lvmactivate.assert_called_once_with()
+ mock_fs.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm')
+ def test_config_resource_stack_ocfs2_along(self, mock_dlm, mock_mkfs, mock_fs):
+ self.ocfs2_inst3._config_resource_stack_ocfs2_along()
+ mock_dlm.assert_called_once_with()
+ mock_mkfs.assert_called_once_with("/dev/sdb2")
+ mock_fs.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_lvm2')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.utils.all_exist_id')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
+ @mock.patch('logging.Logger.info')
+ def test_init_ocfs2_lvm2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_lvm2):
+ mock_all_id.return_value = []
+ mock_get.return_value = None
+ self.ocfs2_inst7.mount_point = "/data"
+ self.ocfs2_inst7.target_device = "/dev/vg1/lv1"
+ self.ocfs2_inst7.init_ocfs2()
+ mock_status.assert_has_calls([
+ mock.call("Configuring OCFS2"),
+ mock.call(' \'no-quorum-policy\' is changed to "freeze"'),
+ mock.call(' OCFS2 device %s mounted on %s', '/dev/vg1/lv1', '/data')
+ ])
+ mock_dynamic_verify.assert_called_once_with()
+ mock_all_id.assert_called_once_with()
+ mock_lvm2.assert_called_once_with()
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_ocfs2_along')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.utils.all_exist_id')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
+ @mock.patch('logging.Logger.info')
+ def test_init_ocfs2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_ocfs2):
+ mock_all_id.return_value = []
+ mock_get.return_value = None
+ self.ocfs2_inst3.mount_point = "/data"
+ self.ocfs2_inst3.target_device = "/dev/sda1"
+ self.ocfs2_inst3.init_ocfs2()
+ mock_status.assert_has_calls([
+ mock.call("Configuring OCFS2"),
+ mock.call(' \'no-quorum-policy\' is changed to "freeze"'),
+ mock.call(' OCFS2 device %s mounted on %s', '/dev/sda1', '/data')
+ ])
+ mock_dynamic_verify.assert_called_once_with()
+ mock_all_id.assert_called_once_with()
+ mock_ocfs2.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join_none(self, mock_run):
+ mock_run.return_value = "data"
+ res = self.ocfs2_inst3._find_target_on_join("node1")
+ assert res is None
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join_exception(self, mock_run):
+ mock_run.return_value = """
+params directory="/srv/clusterfs" fstype=ocfs2
+ """
+ with self.assertRaises(ValueError) as err:
+ self.ocfs2_inst3._find_target_on_join("node1")
+ self.assertEqual("Filesystem require configure device", str(err.exception))
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_find_target_on_join(self, mock_run):
+ mock_run.return_value = """
+params directory="/srv/clusterfs" fstype=ocfs2 device="/dev/sda2"
+ """
+ res = self.ocfs2_inst3._find_target_on_join("node1")
+ self.assertEqual(res, "/dev/sda2")
+ mock_run.assert_called_once_with("crm configure show", "node1")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join')
+ def test_join_ocfs2_return(self, mock_find):
+ mock_find.return_value = None
+ self.ocfs2_inst3.join_ocfs2("node1")
+ mock_find.assert_called_once_with("node1")
+
+ @mock.patch('crmsh.utils.compare_uuid_with_peer_dev')
+ @mock.patch('crmsh.utils.is_dev_a_plain_raw_disk_or_partition')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join')
+ def test_join_ocfs2(self, mock_find, mock_long, mock_parser, mock_verify_packages, mock_is_mapper, mock_compare):
+ mock_find.return_value = "/dev/sda2"
+ mock_parser("node1").is_resource_configured.return_value = False
+ mock_is_mapper.return_value = True
+ self.ocfs2_inst3.join_ocfs2("node1")
+ mock_find.assert_called_once_with("node1")
+ mock_verify_packages.assert_called_once_with(False)
+ mock_is_mapper.assert_called_once_with("/dev/sda2", "node1")
+ mock_compare.assert_called_once_with(["/dev/sda2"], "node1")
+
+ @mock.patch('crmsh.ocfs2.OCFS2Manager._static_verify')
+ def test_verify_ocfs2(self, mock_static_verify):
+ context1 = mock.Mock(ocfs2_devices=[])
+ ocfs2.OCFS2Manager.verify_ocfs2(context1)
+ mock_static_verify.assert_called_once_with()
diff --git a/test/unittests/test_parallax.py b/test/unittests/test_parallax.py
new file mode 100644
index 0000000..b934d91
--- /dev/null
+++ b/test/unittests/test_parallax.py
@@ -0,0 +1,104 @@
+from __future__ import unicode_literals
+# Copyright (C) 2019 Xin Liang <XLiang@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parallax.py
+
+
+import unittest
+from unittest import mock
+
+import crmsh.parallax
+import crmsh.prun.prun
+
+
+class TestParallax(unittest.TestCase):
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ # Use the setup to create a fresh instance for each test
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(0, None, None)
+ }
+ result = crmsh.parallax.parallax_call(["node1"], "ls")
+ self.assertEqual(
+ result,
+ [("node1", (0, None, None))],
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call_non_zero_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(1, None, None)
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_call(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_call_255_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(255, None, None)
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_call(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(0, None, None)
+ }
+ result = crmsh.parallax.parallax_run(["node1"], "ls")
+ self.assertEqual(
+ {"node1": (0, None, None)},
+ result,
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run_non_zero_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.ProcessResult(1, None, None)
+ }
+ result = crmsh.parallax.parallax_run(["node1"], "ls")
+ self.assertEqual(
+ {"node1": (1, None, None)},
+ result,
+ )
+
+ @mock.patch("crmsh.prun.prun.prun")
+ def test_run_255_exit_code(self, mock_prun: mock.MagicMock):
+ mock_prun.return_value = {
+ "node1": crmsh.prun.prun.SSHError("alice", "node1", "foo")
+ }
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_run(["node1"], "ls")
+
+ @mock.patch("crmsh.prun.prun.pfetch_from_remote")
+ def test_slurp(self, mock_pfetch: mock.MagicMock):
+ mock_pfetch.return_value = {"node1": "/opt/node1/file.c"}
+ results = crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
+ self.assertListEqual([("node1", "/opt/node1/file.c")], results)
+ mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
+
+ @mock.patch("crmsh.prun.prun.pfetch_from_remote")
+ def test_slurp_exception(self, mock_pfetch: mock.MagicMock):
+ mock_pfetch.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo")}
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
+ mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
+
+ @mock.patch("crmsh.prun.prun.pcopy_to_remote")
+ def test_copy(self, mock_pcopy: mock.MagicMock):
+ mock_pcopy.return_value = {"node1": None, "node2": None}
+ crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
+ mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
+
+ @mock.patch("crmsh.prun.prun.pcopy_to_remote")
+ def test_copy_exception(self, mock_pcopy: mock.MagicMock):
+ mock_pcopy.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo"), "node2": None}
+ with self.assertRaises(ValueError):
+ crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
+ mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
diff --git a/test/unittests/test_parse.py b/test/unittests/test_parse.py
new file mode 100644
index 0000000..27b26b9
--- /dev/null
+++ b/test/unittests/test_parse.py
@@ -0,0 +1,749 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for parse.py
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from builtins import zip
+from crmsh import parse
+import unittest
+import shlex
+from crmsh.utils import lines2cli
+from crmsh.xmlutil import xml_tostring
+from lxml import etree
+
+
+def test_score_to_kind():
+ assert parse.score_to_kind("0") == "Optional"
+ assert parse.score_to_kind("INFINITY") == "Mandatory"
+ assert parse.score_to_kind("200") == "Mandatory"
+
+
+class MockValidation(parse.Validation):
+ def resource_roles(self):
+ return ['Master', 'Slave', 'Started']
+
+ def resource_actions(self):
+ return ['start', 'stop', 'promote', 'demote']
+
+ def date_ops(self):
+ return ['lt', 'gt', 'in_range', 'date_spec']
+
+ def expression_types(self):
+ return ['normal', 'string', 'number']
+
+ def rsc_order_kinds(self):
+ return ['Mandatory', 'Optional', 'Serialize']
+
+ def op_attributes(self):
+ return ['id', 'name', 'interval', 'timeout', 'description',
+ 'start-delay', 'interval-origin', 'timeout', 'enabled',
+ 'record-pending', 'role', 'requires', 'on-fail']
+
+ def acl_2_0(self):
+ return True
+
+
+class TestBaseParser(unittest.TestCase):
+ def setUp(self):
+ self.base = parse.BaseParser()
+
+ def _reset(self, cmd):
+ self.base._cmd = shlex.split(cmd)
+ self.base._currtok = 0
+
+ @mock.patch('logging.Logger.error')
+ def test_err(self, mock_err):
+ self._reset('a:b:c:d')
+
+ def runner():
+ self.base.match_split()
+ self.assertRaises(parse.ParseError, runner)
+
+ @mock.patch('logging.Logger.error')
+ def test_idspec(self, mock_error):
+ self._reset('$id=foo')
+ self.base.match_idspec()
+ self.assertEqual(self.base.matched(1), '$id')
+ self.assertEqual(self.base.matched(2), 'foo')
+
+ self._reset('$id-ref=foo')
+ self.base.match_idspec()
+ self.assertEqual(self.base.matched(1), '$id-ref')
+ self.assertEqual(self.base.matched(2), 'foo')
+
+ def runner():
+ self._reset('id=foo')
+ self.base.match_idspec()
+ self.assertRaises(parse.ParseError, runner)
+
+ def test_match_split(self):
+ self._reset('resource:role')
+ a, b = self.base.match_split()
+ self.assertEqual(a, 'resource')
+ self.assertEqual(b, 'role')
+
+ self._reset('role')
+ a, b = self.base.match_split()
+ self.assertEqual(a, 'role')
+ self.assertEqual(b, None)
+
+ def test_description(self):
+ self._reset('description="this is a description"')
+ self.assertEqual(self.base.try_match_description(), 'this is a description')
+
+ def test_nvpairs(self):
+ self._reset('foo=bar wiz="fizz buzz" bug= bug2=')
+ ret = self.base.match_nvpairs()
+ self.assertEqual(len(ret), 4)
+ retdict = dict([(r.get('name'), r.get('value')) for r in ret])
+ self.assertEqual(retdict['foo'], 'bar')
+ self.assertEqual(retdict['bug'], '')
+ self.assertEqual(retdict['wiz'], 'fizz buzz')
+
+
+class TestCliParser(unittest.TestCase):
+ def setUp(self):
+ parse.validator = MockValidation()
+ self.comments = []
+
+ def _parse(self, s):
+ return parse.parse(s, comments=self.comments)
+
+ @mock.patch('logging.Logger.error')
+ def test_node(self, mock_error):
+ out = self._parse('node node-1')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node $id=testid node-1')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node 1: node-1')
+ self.assertEqual(out.get('id'), '1')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node testid: node-1')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+
+ out = self._parse('node $id=testid node-1:ping')
+ self.assertEqual(out.get('id'), 'testid')
+ self.assertEqual(out.get('uname'), 'node-1')
+ self.assertEqual(out.get('type'), 'ping')
+
+ out = self._parse('node node-1:unknown')
+ self.assertFalse(out)
+
+ out = self._parse('node node-1 description="foo bar" attributes foo=bar')
+ self.assertEqual(out.get('description'), 'foo bar')
+ self.assertEqual(['bar'], out.xpath('instance_attributes/nvpair[@name="foo"]/@value'))
+
+ out = self._parse('node node-1 attributes foo=bar utilization wiz=bang')
+ self.assertEqual(['bar'], out.xpath('instance_attributes/nvpair[@name="foo"]/@value'))
+ self.assertEqual(['bang'], out.xpath('utilization/nvpair[@name="wiz"]/@value'))
+
+ @mock.patch('logging.Logger.error')
+ def test_resources(self, mock_error):
+ out = self._parse('primitive www ocf:heartbeat:apache op monitor timeout=10s')
+ self.assertEqual(out.get('id'), 'www')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['monitor'], out.xpath('//op/@name'))
+
+ out = self._parse('rsc_template public_vm ocf:heartbeat:Xen op start timeout=300s op stop timeout=300s op monitor interval=30s timeout=60s op migrate_from timeout=600s op migrate_to timeout=600s')
+ self.assertEqual(out.get('id'), 'public_vm')
+ self.assertEqual(out.get('class'), 'ocf')
+ #print out
+
+ out = self._parse('primitive st stonith:ssh params hostlist=node1 meta target-role=Started requires=nothing op start timeout=60s op monitor interval=60m timeout=60s')
+ self.assertEqual(out.get('id'), 'st')
+
+ out2 = self._parse('primitive st stonith:ssh hostlist=node1 meta target-role=Started requires=nothing op start timeout=60s op monitor interval=60m timeout=60s')
+ self.assertEqual(out2.get('id'), 'st')
+
+ self.assertEqual(xml_tostring(out), xml_tostring(out2))
+
+ out = self._parse('primitive st stonith:ssh params hostlist= meta')
+ self.assertEqual(out.get('id'), 'st')
+
+ out = self._parse('primitive st stonith:null params hostlist=node1 meta requires=nothing description="some description here" op start op monitor interval=60m')
+ self.assertEqual(out.get('id'), 'st')
+
+ out = self._parse('ms m0 resource params a=b')
+ self.assertEqual(out.get('id'), 'm0')
+ print(xml_tostring(out))
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('instance_attributes/nvpair[@name="a"]/@value'))
+
+ out2 = self._parse('ms m0 resource a=b')
+ self.assertEqual(out.get('id'), 'm0')
+ self.assertEqual(xml_tostring(out), xml_tostring(out2))
+
+ out = self._parse('master ma resource meta a=b')
+ self.assertEqual(out.get('id'), 'ma')
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('meta_attributes/nvpair[@name="a"]/@value'))
+
+ out = self._parse('clone clone-1 resource meta a=b')
+ self.assertEqual(out.get('id'), 'clone-1')
+ self.assertEqual(['resource'], out.xpath('./crmsh-ref/@id'))
+ self.assertEqual(['b'], out.xpath('meta_attributes/nvpair[@name="a"]/@value'))
+
+ out = self._parse('group group-1 a')
+ self.assertEqual(out.get('id'), 'group-1')
+ self.assertEqual(len(out), 1)
+
+ out = self._parse('group group-1 a b c')
+ self.assertEqual(len(out), 3)
+
+ out = self._parse('group group-1')
+ self.assertFalse(out)
+
+ out = self._parse('group group-1 params a=b')
+ self.assertEqual(len(out), 1)
+ self.assertEqual(['b'], out.xpath('/group/instance_attributes/nvpair[@name="a"]/@value'))
+
+ def test_heartbeat_class(self):
+ out = self._parse('primitive p_node-activate heartbeat:node-activate')
+ self.assertEqual(out.get('id'), 'p_node-activate')
+ self.assertEqual(out.get('class'), 'heartbeat')
+ self.assertEqual(out.get('provider'), None)
+ self.assertEqual(out.get('type'), 'node-activate')
+
+
+ def test_nvpair_ref(self):
+ out = self._parse('primitive dummy-0 Dummy params @foo')
+ self.assertEqual(out.get('id'), 'dummy-0')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['foo'], out.xpath('.//nvpair/@id-ref'))
+
+ out = self._parse('primitive dummy-0 Dummy params @fiz:buz')
+ self.assertEqual(out.get('id'), 'dummy-0')
+ self.assertEqual(out.get('class'), 'ocf')
+ self.assertEqual(['fiz'], out.xpath('.//nvpair/@id-ref'))
+ self.assertEqual(['buz'], out.xpath('.//nvpair/@name'))
+
+ @mock.patch('logging.Logger.error')
+ def test_location(self, mock_error):
+ out = self._parse('location loc-1 resource inf: foo')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'resource')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'foo')
+
+ out = self._parse('location loc-1 /foo.*/ inf: bar')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc-pattern'), 'foo.*')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'bar')
+ #print out
+
+ out = self._parse('location loc-1 // inf: bar')
+ self.assertFalse(out)
+
+ out = self._parse('location loc-1 { one ( two three ) four } inf: bar')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(['one', 'two', 'three', 'four'], out.xpath('//resource_ref/@id'))
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'bar')
+ #print out
+
+ out = self._parse('location loc-1 thing rule role=slave -inf: #uname eq madrid')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'thing')
+ self.assertEqual(out.get('score'), None)
+
+ out = self._parse('location l { a:foo b:bar }')
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_colocation(self, mock_error):
+ out = self._parse('colocation col-1 inf: foo:master ( bar wiz sequential=yes )')
+ self.assertEqual(out.get('id'), 'col-1')
+ self.assertEqual(['foo', 'bar', 'wiz'], out.xpath('//resource_ref/@id'))
+ self.assertEqual([], out.xpath('//resource_set[@name="sequential"]/@value'))
+
+ out = self._parse(
+ 'colocation col-1 -20: foo:Master ( bar wiz ) ( zip zoo ) node-attribute="fiz"')
+ self.assertEqual(out.get('id'), 'col-1')
+ self.assertEqual(out.get('score'), '-20')
+ self.assertEqual(['foo', 'bar', 'wiz', 'zip', 'zoo'], out.xpath('//resource_ref/@id'))
+ self.assertEqual(['fiz'], out.xpath('//@node-attribute'))
+
+ out = self._parse('colocation col-1 0: a:master b')
+ self.assertEqual(out.get('id'), 'col-1')
+
+ out = self._parse('colocation col-1 10: ) bar wiz')
+ self.assertFalse(out)
+
+ out = self._parse('colocation col-1 10: ( bar wiz')
+ self.assertFalse(out)
+
+ out = self._parse('colocation col-1 10: ( bar wiz ]')
+ self.assertFalse(out)
+
+ def test_order(self):
+ out = self._parse('order o1 Mandatory: [ A B sequential=true ] C')
+ print(xml_tostring(out))
+ self.assertEqual(['Mandatory'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(['false'], out.xpath('/rsc_order/resource_set/@require-all'))
+ self.assertEqual(['A', 'B', 'C'], out.xpath('//resource_ref/@id'))
+
+ out = self._parse('order o1 Mandatory: [ A B sequential=false ] C')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['require-all', 'false'] in out.resources[0][1])
+ #self.assertTrue(['sequential', 'false'] in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Mandatory: A B C sequential=false')
+ self.assertEqual(1, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['sequential', 'false'] in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Mandatory: A B C sequential=true')
+ self.assertEqual(1, len(out.xpath('/rsc_order/resource_set')))
+ #self.assertTrue(['sequential', 'true'] not in out.resources[0][1])
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order c_apache_1 Mandatory: apache:start ip_1')
+ self.assertEqual(out.get('id'), 'c_apache_1')
+
+ out = self._parse('order c_apache_2 Mandatory: apache:start ip_1 ip_2 ip_3')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'c_apache_2')
+
+ out = self._parse('order o1 Serialize: A ( B C )')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+
+ out = self._parse('order o1 Serialize: A ( B C ) symmetrical=false')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+ self.assertEqual(['false'], out.xpath('//@symmetrical'))
+
+ out = self._parse('order o1 Serialize: A ( B C ) symmetrical=true')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'o1')
+ self.assertEqual(['true'], out.xpath('//@symmetrical'))
+
+ inp = 'colocation rsc_colocation-master INFINITY: [ vip-master vip-rep sequential=true ] [ msPostgresql:Master sequential=true ]'
+ out = self._parse(inp)
+ self.assertEqual(2, len(out.xpath('/rsc_colocation/resource_set')))
+ self.assertEqual(out.get('id'), 'rsc_colocation-master')
+
+ out = self._parse('order order_2 Mandatory: [ A B ] C')
+ self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
+ self.assertEqual(out.get('id'), 'order_2')
+ self.assertEqual(['Mandatory'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(['false'], out.xpath('//resource_set/@sequential'))
+
+ out = self._parse('order order-1 Optional: group1:stop group2:start')
+ self.assertEqual(out.get('id'), 'order-1')
+ self.assertEqual(['Optional'], out.xpath('/rsc_order/@kind'))
+ self.assertEqual(['group1'], out.xpath('/rsc_order/@first'))
+ self.assertEqual(['stop'], out.xpath('/rsc_order/@first-action'))
+ self.assertEqual(['group2'], out.xpath('/rsc_order/@then'))
+ self.assertEqual(['start'], out.xpath('/rsc_order/@then-action'))
+
+ def test_ticket(self):
+ out = self._parse('rsc_ticket ticket-A_public-ip ticket-A: public-ip')
+ self.assertEqual(out.get('id'), 'ticket-A_public-ip')
+
+ out = self._parse('rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence')
+ self.assertEqual(out.get('id'), 'ticket-A_bigdb')
+
+ out = self._parse(
+ 'rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master')
+ self.assertEqual(out.get('id'), 'ticket-B_storage')
+
+ @mock.patch('logging.Logger.error')
+ def test_bundle(self, mock_error):
+ out = self._parse('bundle httpd docker image=pcmk:httpd replicas=3 network ip-range-start=10.10.10.123 host-netmask=24 port-mapping port=80 storage storage-mapping target-dir=/var/www/html source-dir=/srv/www options=rw primitive httpd-apache')
+ self.assertEqual(out.get('id'), 'httpd')
+ self.assertEqual(['pcmk:httpd'], out.xpath('/bundle/docker/@image'))
+ self.assertEqual(['httpd-apache'], out.xpath('/bundle/crmsh-ref/@id'))
+
+ out = self._parse('bundle httpd docker image=pcmk:httpd primitive httpd-apache apache')
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_op(self, mock_error):
+ out = self._parse('monitor apache:Master 10s:20s')
+ self.assertEqual(out.get('rsc'), 'apache')
+ self.assertEqual(out.get('role'), 'Master')
+ self.assertEqual(out.get('interval'), '10s')
+ self.assertEqual(out.get('timeout'), '20s')
+
+ out = self._parse('monitor apache 60m')
+ self.assertEqual(out.get('rsc'), 'apache')
+ self.assertEqual(out.get('role'), None)
+ self.assertEqual(out.get('interval'), '60m')
+
+ out = self._parse('primitive rsc_dummy1 Dummy op monitor interval=10 OCF_CHECK_LEVEL=10 timeout=60')
+ # incorrect ordering of attributes
+ self.assertFalse(out)
+
+ @mock.patch('logging.Logger.error')
+ def test_acl(self, mock_error):
+ out = self._parse('role user-1 error')
+ self.assertFalse(out)
+ out = self._parse('user user-1 role:user-1')
+ self.assertNotEqual(out, False)
+
+ out = self._parse("role bigdb_admin " +
+ "write meta:bigdb:target-role " +
+ "write meta:bigdb:is-managed " +
+ "write location:bigdb " +
+ "read ref:bigdb")
+ self.assertEqual(4, len(out))
+
+ # new type of acls
+
+ out = self._parse("acl_target foo a")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a'], out.xpath('./role/@id'))
+
+ out = self._parse("acl_target foo a b")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a', 'b'], out.xpath('./role/@id'))
+
+ out = self._parse("acl_target foo a b c")
+ self.assertEqual('acl_target', out.tag)
+ self.assertEqual('foo', out.get('id'))
+ self.assertEqual(['a', 'b', 'c'], out.xpath('./role/@id'))
+ out = self._parse("acl_group fee a b c")
+ self.assertEqual('acl_group', out.tag)
+ self.assertEqual('fee', out.get('id'))
+ self.assertEqual(['a', 'b', 'c'], out.xpath('./role/@id'))
+ out = self._parse('role fum description="test" read a: description="test2" xpath:*[@name=\\"karl\\"]')
+ self.assertEqual(['*[@name="karl"]'], out.xpath('/acl_role/acl_permission/@xpath'))
+
+ def test_xml(self):
+ out = self._parse('xml <node uname="foo-1"/>')
+ self.assertEqual('node', out.tag)
+ self.assertEqual('foo-1', out.get('uname'))
+
+ @mock.patch('logging.Logger.error')
+ def test_property(self, mock_error):
+ out = self._parse('property stonith-enabled=true')
+ self.assertEqual(['true'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+
+ # missing score
+ out = self._parse('property rule #uname eq node1 stonith-enabled=no')
+ self.assertEqual(['INFINITY'], out.xpath('//@score'))
+
+ out = self._parse('property rule 10: #uname eq node1 stonith-enabled=no')
+ self.assertEqual(['no'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+ self.assertEqual(['node1'], out.xpath('//expression[@attribute="#uname"]/@value'))
+
+ out = self._parse('property rule +inf: date spec years=2014 stonith-enabled=no')
+ self.assertEqual(['no'], out.xpath('//nvpair[@name="stonith-enabled"]/@value'))
+ self.assertEqual(['2014'], out.xpath('//date_spec/@years'))
+
+ out = self._parse('rsc_defaults failure-timeout=3m')
+ self.assertEqual(['3m'], out.xpath('//nvpair[@name="failure-timeout"]/@value'))
+
+ out = self._parse('rsc_defaults foo: failure-timeout=3m')
+ self.assertEqual('foo', out[0].get('id'))
+ self.assertEqual(['3m'], out.xpath('//nvpair[@name="failure-timeout"]/@value'))
+
+ out = self._parse('rsc_defaults failure-timeout=3m foo:')
+ self.assertEqual(False, out)
+
+ def test_empty_property_sets(self):
+ out = self._parse('rsc_defaults defaults:')
+ self.assertEqual('<rsc_defaults><meta_attributes id="defaults"/></rsc_defaults>',
+ xml_tostring(out))
+
+ out = self._parse('op_defaults defaults:')
+ self.assertEqual('<op_defaults><meta_attributes id="defaults"/></op_defaults>',
+ xml_tostring(out))
+
+ def test_fencing(self):
+ # num test nodes are 3
+
+ out = self._parse('fencing_topology')
+ expect = '<fencing-topology/>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology poison-pill power')
+ expect = '<fencing-topology><fencing-level target="ha-one" index="1" devices="poison-pill"/><fencing-level target="ha-one" index="2" devices="power"/><fencing-level target="ha-three" index="1" devices="poison-pill"/><fencing-level target="ha-three" index="2" devices="power"/><fencing-level target="ha-two" index="1" devices="poison-pill"/><fencing-level target="ha-two" index="2" devices="power"/></fencing-topology>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology node-a: poison-pill power node-b: ipmi serial')
+ self.assertEqual(4, len(out))
+
+ devs = ['stonith-vbox3-1-off', 'stonith-vbox3-2-off',
+ 'stonith-vbox3-1-on', 'stonith-vbox3-2-on']
+ out = self._parse('fencing_topology vbox4: %s' % ','.join(devs))
+ print(xml_tostring(out))
+ self.assertEqual(1, len(out))
+
+ def test_fencing_1114(self):
+ """
+ Test node attribute fence target assignment
+ """
+ out = self._parse('fencing_topology attr:rack=1 poison-pill power')
+ expect = """<fencing-topology><fencing-level index="1" devices="poison-pill" target-attribute="rack" target-value="1"/><fencing-level index="2" devices="power" target-attribute="rack" target-value="1"/></fencing-topology>"""
+ self.assertEqual(expect, xml_tostring(out))
+
+ out = self._parse('fencing_topology attr:rack=1 poison-pill,power')
+ expect = '<fencing-topology><fencing-level index="1" devices="poison-pill,power" target-attribute="rack" target-value="1"/></fencing-topology>'
+ self.assertEqual(expect, xml_tostring(out))
+
+ @mock.patch('logging.Logger.error')
+ def test_tag(self, mock_error):
+ out = self._parse('tag tag1: one two three')
+ self.assertEqual(out.get('id'), 'tag1')
+ self.assertEqual(['one', 'two', 'three'], out.xpath('/tag/obj_ref/@id'))
+
+ out = self._parse('tag tag1:')
+ self.assertFalse(out)
+
+ out = self._parse('tag tag1:: foo')
+ self.assertFalse(out)
+
+ out = self._parse('tag tag1 foo bar')
+ self.assertEqual(out.get('id'), 'tag1')
+ self.assertEqual(['foo', 'bar'], out.xpath('/tag/obj_ref/@id'))
+
+ def test_alerts(self):
+ "Test alerts (1.1.15+)"
+ out = self._parse('alert alert1 /tmp/foo.sh to /tmp/bar.log')
+ self.assertEqual(out.get('id'), 'alert1')
+ self.assertEqual(['/tmp/foo.sh'],
+ out.xpath('/alert/@path'))
+ self.assertEqual(['/tmp/bar.log'],
+ out.xpath('/alert/recipient/@value'))
+
+ def test_alerts_brackets(self):
+ "Test alerts w/ brackets (1.1.15+)"
+ out = self._parse('alert alert2 /tmp/foo.sh to { /tmp/bar.log meta timeout=10s }')
+ self.assertEqual(out.get('id'), 'alert2')
+ self.assertEqual(['/tmp/foo.sh'],
+ out.xpath('/alert/@path'))
+ self.assertEqual(['/tmp/bar.log'],
+ out.xpath('/alert/recipient/@value'))
+ self.assertEqual(['10s'],
+ out.xpath('/alert/recipient/meta_attributes/nvpair[@name="timeout"]/@value'))
+
+ def test_alerts_selectors(self):
+ "Test alerts w/ selectors (1.1.17+)"
+ out = self._parse('alert alert3 /tmp/foo.sh select nodes fencing attributes { standby shutdown } to { /tmp/bar.log meta timeout=10s }')
+ self.assertEqual(out.get('id'), 'alert3')
+ self.assertEqual(1, len(out.xpath('/alert/select/select_nodes')))
+ self.assertEqual(1, len(out.xpath('/alert/select/select_fencing')))
+ self.assertEqual(['standby', 'shutdown'],
+ out.xpath('/alert/select/select_attributes/attribute/@name'))
+
+
+ def _parse_lines(self, lines):
+ out = []
+ for line in lines2cli(lines):
+ if line is not None:
+ tmp = self._parse(line.strip())
+ self.assertNotEqual(tmp, False)
+ if tmp is not None:
+ out.append(tmp)
+ return out
+
+ def test_comments(self):
+ outp = self._parse_lines('''
+ # comment
+ node n1
+ ''')
+ self.assertNotEqual(-1, xml_tostring(outp[0]).find('# comment'))
+
+ def test_uppercase(self):
+ outp = self._parse_lines('''
+ PRIMITIVE rsc_dummy ocf:heartbeat:Dummy
+ MONITOR rsc_dummy 30
+ ''')
+ #print outp
+ self.assertEqual('primitive', outp[0].tag)
+ self.assertEqual('op', outp[1].tag)
+
+ outp = self._parse_lines('''
+ PRIMITIVE testfs ocf:heartbeat:Filesystem \
+ PARAMS directory="/mnt" fstype="ocfs2" device="/dev/sda1"
+ CLONE testfs-clone testfs \
+ META ordered="true" interleave="true"
+ ''')
+ #print outp
+ self.assertEqual('primitive', outp[0].tag)
+ self.assertEqual('clone', outp[1].tag)
+
+ out = self._parse('LOCATION loc-1 resource INF: foo')
+ self.assertEqual(out.get('id'), 'loc-1')
+ self.assertEqual(out.get('rsc'), 'resource')
+ self.assertEqual(out.get('score'), 'INFINITY')
+ self.assertEqual(out.get('node'), 'foo')
+
+ out = self._parse('NODE node-1 ATTRIBUTES foo=bar UTILIZATION wiz=bang')
+ self.assertEqual('node-1', out.get('uname'))
+ self.assertEqual(['bar'], out.xpath('/node/instance_attributes/nvpair[@name="foo"]/@value'))
+ self.assertEqual(['bang'], out.xpath('/node/utilization/nvpair[@name="wiz"]/@value'))
+
+ out = self._parse('PRIMITIVE virtual-ip ocf:heartbeat:IPaddr2 PARAMS ip=192.168.122.13 lvs_support=false OP start timeout=20 interval=0 OP stop timeout=20 interval=0 OP monitor interval=10 timeout=20')
+ self.assertEqual(['192.168.122.13'], out.xpath('//instance_attributes/nvpair[@name="ip"]/@value'))
+
+ out = self._parse('GROUP web-server virtual-ip apache META target-role=Started')
+ self.assertEqual(out.get('id'), 'web-server')
+
+ def test_nvpair_novalue(self):
+ inp = """primitive stonith_ipmi-karl stonith:fence_ipmilan \
+ params pcmk_host_list=karl verbose action=reboot \
+ ipaddr=10.43.242.221 login=root passwd=dummy method=onoff \
+ op start interval=0 timeout=60 \
+ op stop interval=0 timeout=60 \
+ op monitor interval=600 timeout=60 \
+ meta target-role=Started"""
+
+ outp = self._parse_lines(inp)
+ self.assertEqual(len(outp), 1)
+ self.assertEqual('primitive', outp[0].tag)
+ # print xml_tostring(outp[0])
+ verbose = outp[0].xpath('//nvpair[@name="verbose"]')
+ self.assertEqual(len(verbose), 1)
+ self.assertTrue('value' not in verbose[0].attrib)
+
+ @mock.patch('logging.Logger.error')
+ def test_configs(self, mock_error):
+ outp = self._parse_lines('''
+ primitive rsc_dummy ocf:heartbeat:Dummy
+ monitor rsc_dummy 30
+ ''')
+ #print outp
+ self.assertEqual(2, len(outp))
+
+ outp = self._parse_lines('''
+ primitive testfs ocf:heartbeat:Filesystem \
+ params directory="/mnt" fstype="ocfs2" device="/dev/sda1"
+ clone testfs-clone testfs \
+ meta ordered="true" interleave="true"
+ ''')
+ #print outp
+ self.assertEqual(2, len(outp))
+
+ inp = [
+ """node node1 attributes mem=16G""",
+ """node node2 utilization cpu=4""",
+ """primitive st stonith:ssh \
+ params hostlist='node1 node2' \
+ meta target-role="Started" requires="nothing" \
+ op start timeout=60s \
+ op monitor interval=60m timeout=60s""",
+ """primitive st2 stonith:ssh \
+ params hostlist='node1 node2'""",
+ """primitive d1 ocf:pacemaker:Dummy \
+ operations $id=d1-ops \
+ op monitor interval=60m \
+ op monitor interval=120m OCF_CHECK_LEVEL=10""",
+ """monitor d1 60s:30s""",
+ """primitive d2 ocf:heartbeat:Delay \
+ params mondelay=60 \
+ op start timeout=60s \
+ op stop timeout=60s""",
+ """monitor d2:Started 60s:30s""",
+ """group g1 d1 d2""",
+ """primitive d3 ocf:pacemaker:Dummy""",
+ """clone c d3 \
+ meta clone-max=1""",
+ """primitive d4 ocf:pacemaker:Dummy""",
+ """ms m d4""",
+ """primitive s5 ocf:pacemaker:Stateful \
+ operations $id-ref=d1-ops""",
+ """primitive s6 ocf:pacemaker:Stateful \
+ operations $id-ref=d1""",
+ """ms m5 s5""",
+ """ms m6 s6""",
+ """location l1 g1 100: node1""",
+ """location l2 c \
+ rule $id=l2-rule1 100: #uname eq node1""",
+ """location l3 m5 \
+ rule inf: #uname eq node1 and pingd gt 0""",
+ """location l4 m5 \
+ rule -inf: not_defined pingd or pingd lte 0""",
+ """location l5 m5 \
+ rule -inf: not_defined pingd or pingd lte 0 \
+ rule inf: #uname eq node1 and pingd gt 0 \
+ rule inf: date lt "2009-05-26" and \
+ date in start="2009-05-26" end="2009-07-26" and \
+ date in start="2009-05-26" years="2009" and \
+ date date_spec years="2009" hours=09-17""",
+ """location l6 m5 \
+ rule $id-ref=l2-rule1""",
+ """location l7 m5 \
+ rule $id-ref=l2""",
+ """collocation c1 inf: m6 m5""",
+ """collocation c2 inf: m5:Master d1:Started""",
+ """order o1 Mandatory: m5 m6""",
+ """order o2 Optional: d1:start m5:promote""",
+ """order o3 Serialize: m5 m6""",
+ """order o4 inf: m5 m6""",
+ """rsc_ticket ticket-A_m6 ticket-A: m6""",
+ """rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=fence""",
+ """rsc_ticket ticket-C_master ticket-C: m6 m5:Master loss-policy=fence""",
+ """fencing_topology st st2""",
+ """property stonith-enabled=true""",
+ """property $id=cpset2 maintenance-mode=true""",
+ """rsc_defaults failure-timeout=10m""",
+ """op_defaults $id=opsdef2 record-pending=true"""]
+
+ outp = self._parse_lines('\n'.join(inp))
+ a = [xml_tostring(x) for x in outp]
+ b = [
+ '<node uname="node1"><instance_attributes><nvpair name="mem" value="16G"/></instance_attributes></node>',
+ '<node uname="node2"><utilization><nvpair name="cpu" value="4"/></utilization></node>',
+ '<primitive id="st" class="stonith" type="ssh"><instance_attributes><nvpair name="hostlist" value="node1 node2"/></instance_attributes><meta_attributes><nvpair name="target-role" value="Started"/><nvpair name="requires" value="nothing"/></meta_attributes><operations><op name="start" timeout="60s" interval="0s"/><op name="monitor" interval="60m" timeout="60s"/></operations></primitive>',
+ '<primitive id="st2" class="stonith" type="ssh"><instance_attributes><nvpair name="hostlist" value="node1 node2"/></instance_attributes></primitive>',
+ '<primitive id="d1" class="ocf" provider="pacemaker" type="Dummy"><operations id="d1-ops"><op name="monitor" interval="60m"/><op name="monitor" interval="120m"><instance_attributes><nvpair name="OCF_CHECK_LEVEL" value="10"/></instance_attributes></op></operations></primitive>',
+ '<op name="monitor" rsc="d1" interval="60s" timeout="30s"/>',
+ '<primitive id="d2" class="ocf" provider="heartbeat" type="Delay"><instance_attributes><nvpair name="mondelay" value="60"/></instance_attributes><operations><op name="start" timeout="60s" interval="0s"/><op name="stop" timeout="60s" interval="0s"/></operations></primitive>',
+ '<op name="monitor" role="Started" rsc="d2" interval="60s" timeout="30s"/>',
+ '<group id="g1"><crmsh-ref id="d1"/><crmsh-ref id="d2"/></group>',
+ '<primitive id="d3" class="ocf" provider="pacemaker" type="Dummy"/>',
+ '<clone id="c"><meta_attributes><nvpair name="clone-max" value="1"/></meta_attributes><crmsh-ref id="d3"/></clone>',
+ '<primitive id="d4" class="ocf" provider="pacemaker" type="Dummy"/>',
+ '<master id="m"><crmsh-ref id="d4"/></master>',
+ '<primitive id="s5" class="ocf" provider="pacemaker" type="Stateful"><operations id-ref="d1-ops"/></primitive>',
+ '<primitive id="s6" class="ocf" provider="pacemaker" type="Stateful"><operations id-ref="d1"/></primitive>',
+ '<master id="m5"><crmsh-ref id="s5"/></master>',
+ '<master id="m6"><crmsh-ref id="s6"/></master>',
+ '<rsc_location id="l1" rsc="g1" score="100" node="node1"/>',
+ '<rsc_location id="l2" rsc="c"><rule id="l2-rule1" score="100"><expression operation="eq" attribute="#uname" value="node1"/></rule></rsc_location>',
+ '<rsc_location id="l3" rsc="m5"><rule score="INFINITY"><expression operation="eq" attribute="#uname" value="node1"/><expression operation="gt" attribute="pingd" value="0"/></rule></rsc_location>',
+ '<rsc_location id="l4" rsc="m5"><rule score="-INFINITY" boolean-op="or"><expression operation="not_defined" attribute="pingd"/><expression operation="lte" attribute="pingd" value="0"/></rule></rsc_location>',
+ '<rsc_location id="l5" rsc="m5"><rule score="-INFINITY" boolean-op="or"><expression operation="not_defined" attribute="pingd"/><expression operation="lte" attribute="pingd" value="0"/></rule><rule score="INFINITY"><expression operation="eq" attribute="#uname" value="node1"/><expression operation="gt" attribute="pingd" value="0"/></rule><rule score="INFINITY"><date_expression operation="lt" end="2009-05-26"/><date_expression operation="in_range" start="2009-05-26" end="2009-07-26"/><date_expression operation="in_range" start="2009-05-26"><duration years="2009"/></date_expression><date_expression operation="date_spec"><date_spec years="2009" hours="09-17"/></date_expression></rule></rsc_location>',
+ '<rsc_location id="l6" rsc="m5"><rule id-ref="l2-rule1"/></rsc_location>',
+ '<rsc_location id="l7" rsc="m5"><rule id-ref="l2"/></rsc_location>',
+ '<rsc_colocation id="c1" score="INFINITY" rsc="m6" with-rsc="m5"/>',
+ '<rsc_colocation id="c2" score="INFINITY" rsc="m5" rsc-role="Master" with-rsc="d1" with-rsc-role="Started"/>',
+ '<rsc_order id="o1" kind="Mandatory" first="m5" then="m6"/>',
+ '<rsc_order id="o2" kind="Optional" first="d1" first-action="start" then="m5" then-action="promote"/>',
+ '<rsc_order id="o3" kind="Serialize" first="m5" then="m6"/>',
+ '<rsc_order id="o4" kind="Mandatory" first="m5" then="m6"/>',
+ '<rsc_ticket id="ticket-A_m6" ticket="ticket-A" rsc="m6"/>',
+ '<rsc_ticket id="ticket-B_m6_m5" ticket="ticket-B" loss-policy="fence"><resource_set><resource_ref id="m6"/><resource_ref id="m5"/></resource_set></rsc_ticket>',
+ '<rsc_ticket id="ticket-C_master" ticket="ticket-C" loss-policy="fence"><resource_set><resource_ref id="m6"/></resource_set><resource_set role="Master"><resource_ref id="m5"/></resource_set></rsc_ticket>',
+ '<fencing-topology><fencing-level target="ha-one" index="1" devices="st"/><fencing-level target="ha-one" index="2" devices="st2"/><fencing-level target="ha-three" index="1" devices="st"/><fencing-level target="ha-three" index="2" devices="st2"/><fencing-level target="ha-two" index="1" devices="st"/><fencing-level target="ha-two" index="2" devices="st2"/></fencing-topology>',
+ '<cluster_property_set><nvpair name="stonith-enabled" value="true"/></cluster_property_set>',
+ '<cluster_property_set id="cpset2"><nvpair name="maintenance-mode" value="true"/></cluster_property_set>',
+ '<rsc_defaults><meta_attributes><nvpair name="failure-timeout" value="10m"/></meta_attributes></rsc_defaults>',
+ '<op_defaults><meta_attributes id="opsdef2"><nvpair name="record-pending" value="true"/></meta_attributes></op_defaults>',
+ ]
+
+ for result, expected in zip(a, b):
+ self.maxDiff = None
+ self.assertEqual(expected, result)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittests/test_prun.py b/test/unittests/test_prun.py
new file mode 100644
index 0000000..7e987bf
--- /dev/null
+++ b/test/unittests/test_prun.py
@@ -0,0 +1,157 @@
+import typing
+
+import crmsh.constants
+import crmsh.prun.prun
+import crmsh.prun.runner
+
+import unittest
+from unittest import mock
+
+
+class TestPrun(unittest.TestCase):
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo", "host2": "bar"}
+ mock_user_pair_for_ssh.return_value = "alice", "bob"
+ mock_is_local_host.return_value = False
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_user_pair_for_ssh.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_is_local_host.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_runner_add_task.assert_has_calls([
+ mock.call(TaskArgumentsEq(
+ ['su', 'alice', '--login', '-c', 'ssh {} bob@host1 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'bob'},
+ )),
+ mock.call(TaskArgumentsEq(
+ ['su', 'alice', '--login', '-c', 'ssh {} bob@host2 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'bar',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host2', "ssh_user": 'bob'},
+ )),
+ ])
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1", "host2"}, set(results.keys()))
+
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun_root(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo", "host2": "bar"}
+ mock_user_pair_for_ssh.return_value = "root", "root"
+ mock_is_local_host.return_value = False
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_geteuid.assert_not_called()
+ mock_user_pair_for_ssh.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_is_local_host.assert_has_calls([
+ mock.call("host1"),
+ mock.call("host2"),
+ ])
+ mock_runner_add_task.assert_has_calls([
+ mock.call(TaskArgumentsEq(
+ ['/bin/sh', '-c', 'ssh {} root@host1 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'root'},
+ )),
+ mock.call(TaskArgumentsEq(
+ ['/bin/sh', '-c', 'ssh {} root@host2 sudo -H /bin/sh'.format(crmsh.constants.SSH_OPTION)],
+ b'bar',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host2', "ssh_user": 'root'},
+ )),
+ ])
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1", "host2"}, set(results.keys()))
+
+ @mock.patch("os.geteuid")
+ @mock.patch("crmsh.userdir.getuser")
+ @mock.patch("crmsh.prun.prun._is_local_host")
+ @mock.patch("crmsh.user_of_host.UserOfHost.user_pair_for_ssh")
+ @mock.patch("crmsh.prun.runner.Runner.run")
+ @mock.patch("crmsh.prun.runner.Runner.add_task")
+ def test_prun_localhost(
+ self,
+ mock_runner_add_task: mock.MagicMock,
+ mock_runner_run: mock.MagicMock,
+ mock_user_pair_for_ssh: mock.MagicMock,
+ mock_is_local_host: mock.MagicMock,
+ mock_getuser: mock.MagicMock,
+ mock_geteuid: mock.MagicMock,
+ ):
+ host_cmdline = {"host1": "foo"}
+ #mock_user_pair_for_ssh.return_value = "alice", "bob"
+ mock_is_local_host.return_value = True
+ mock_getuser.return_value = 'root'
+ mock_geteuid.return_value = 0
+ results = crmsh.prun.prun.prun(host_cmdline)
+ mock_user_pair_for_ssh.assert_not_called()
+ mock_is_local_host.assert_called_once_with('host1')
+ mock_runner_add_task.assert_called_once_with(
+ TaskArgumentsEq(
+ ['/bin/sh'],
+ b'foo',
+ stdout=crmsh.prun.runner.Task.Capture,
+ stderr=crmsh.prun.runner.Task.Capture,
+ context={"host": 'host1', "ssh_user": 'root'},
+ )
+ )
+ mock_user_pair_for_ssh.assert_not_called()
+ mock_runner_run.assert_called_once()
+ self.assertTrue(isinstance(results, typing.Dict))
+ self.assertSetEqual({"host1"}, set(results.keys()))
+
+
+class TaskArgumentsEq(crmsh.prun.runner.Task):
+ def __eq__(self, other):
+ if not isinstance(other, crmsh.prun.runner.Task):
+ return False
+ return self.args == other.args \
+ and self.input == other.input \
+ and self.stdout_config == other.stdout_config \
+ and self.stderr_config == other.stderr_config \
+ and self.context == other.context
diff --git a/test/unittests/test_qdevice.py b/test/unittests/test_qdevice.py
new file mode 100644
index 0000000..f6b2f13
--- /dev/null
+++ b/test/unittests/test_qdevice.py
@@ -0,0 +1,1031 @@
+import os
+import unittest
+import socket
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import sbd
+from crmsh import qdevice, lock
+
+
+F2 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.2')).read()
+F4 = open(os.path.join(os.path.dirname(__file__), 'corosync.conf.3')).read()
+
+
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_restart(mock_get_dict, mock_quorate):
+ mock_get_dict.return_value = {'Expected': '1', 'Total': '1'}
+ mock_quorate.return_value = False
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_ADD, False, False)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_reload(mock_get_dict, mock_quorate):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = True
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_ADD)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RELOAD
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(3, 2)
+
+
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect_later(mock_get_dict, mock_quorate, mock_parser):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = False
+ mock_parser().is_any_resource_running.return_value = True
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART_LATER
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+@mock.patch('crmsh.utils.calculate_quorate_status')
+@mock.patch('crmsh.utils.get_quorum_votes_dict')
+def test_evaluate_qdevice_quorum_effect(mock_get_dict, mock_quorate, mock_parser):
+ mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+ mock_quorate.return_value = False
+ mock_parser().is_any_resource_running.return_value = False
+ res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
+ assert res == qdevice.QdevicePolicy.QDEVICE_RESTART
+ mock_get_dict.assert_called_once_with()
+ mock_quorate.assert_called_once_with(2, 1)
+
+
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name(mock_remote_lock):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.return_value.__enter__ = mock.Mock()
+ remote_lock_inst.lock.return_value.__exit__ = mock.Mock()
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name_claim_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.ClaimLockError
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_fatal.assert_called_once_with("Duplicated cluster name \"cluster1\"!")
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_same_cluster_name_ssh_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!")
+ @qdevice.qnetd_lock_for_same_cluster_name
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False,
+ lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False)
+
+
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_multi_cluster(mock_remote_lock):
+ _context = mock.Mock(qnetd_addr="qnetd-node")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.return_value.__enter__ = mock.Mock()
+ remote_lock_inst.lock.return_value.__exit__ = mock.Mock()
+ @qdevice.qnetd_lock_for_multi_cluster
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True)
+
+
+@mock.patch('crmsh.utils.fatal')
+@mock.patch('crmsh.lock.RemoteLock')
+def test_qnetd_lock_for_multi_cluster_error(mock_remote_lock, mock_fatal):
+ _context = mock.Mock(qnetd_addr="qnetd-node")
+ remote_lock_inst = mock.Mock()
+ mock_remote_lock.return_value = remote_lock_inst
+ remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!")
+ @qdevice.qnetd_lock_for_multi_cluster
+ def decorated(ctx):
+ return
+ decorated(_context)
+ mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True)
+
+
+class TestQDevice(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ # Use the setup to create a fresh instance for each test
+ self.qdevice_with_ip = qdevice.QDevice("10.10.10.123")
+ self.qdevice_with_hostname = qdevice.QDevice("node.qnetd")
+ self.qdevice_with_invalid_port = qdevice.QDevice("10.10.10.123", port=100)
+ self.qdevice_with_invalid_tie_breaker = qdevice.QDevice("10.10.10.123", tie_breaker="wrong")
+ self.qdevice_with_ip_cluster_node = qdevice.QDevice("10.10.10.123", cluster_node="node1.com")
+ self.qdevice_with_invalid_cmds_relative_path = qdevice.QDevice("10.10.10.123", cmds="ls")
+ self.qdevice_with_invalid_cmds_not_exist = qdevice.QDevice("10.10.10.123", cmds="/not_exist")
+ self.qdevice_with_cluster_name = qdevice.QDevice("10.10.10.123", cluster_name="hacluster1")
+ self.qdevice_with_stage_cluster_name = qdevice.QDevice("10.10.10.123", is_stage=True, cluster_name="cluster1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_qnetd_cacert_on_local(self):
+ res = self.qdevice_with_ip.qnetd_cacert_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt")
+
+ def test_qnetd_cacert_on_cluster(self):
+ res = self.qdevice_with_ip_cluster_node.qnetd_cacert_on_cluster
+ self.assertEqual(res, "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt")
+
+ def test_qdevice_crq_on_qnetd(self):
+ res = self.qdevice_with_cluster_name.qdevice_crq_on_qnetd
+ self.assertEqual(res, "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq.hacluster1")
+
+ def test_qdevice_crq_on_local(self):
+ res = self.qdevice_with_ip.qdevice_crq_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq")
+
+ def test_qnetd_cluster_crt_on_qnetd(self):
+ res = self.qdevice_with_ip.qnetd_cluster_crt_on_qnetd
+ self.assertEqual(res, "/etc/corosync/qnetd/nssdb/cluster-None.crt")
+
+ @mock.patch('os.path.basename')
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_qnetd", new_callable=mock.PropertyMock)
+ def test_qnetd_cluster_crt_on_local(self, mock_qnetd_crt, mock_basename):
+ mock_qnetd_crt.return_value = "crt_file"
+ mock_basename.return_value = "crt_file"
+ res = self.qdevice_with_ip.qnetd_cluster_crt_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/10.10.10.123/crt_file")
+
+ def test_qdevice_p12_on_local(self):
+ res = self.qdevice_with_ip.qdevice_p12_on_local
+ self.assertEqual(res, "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12")
+
+ def test_qdevice_p12_on_cluster(self):
+ res = self.qdevice_with_ip_cluster_node.qdevice_p12_on_cluster
+ self.assertEqual(res, "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12")
+
+ @mock.patch('crmsh.utils.check_port_open')
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr_port_error(self, mock_getaddrinfo, mock_ping, mock_in_local, mock_check):
+ mock_getaddrinfo.return_value = [(None, ("10.10.10.123",)),]
+ mock_in_local.return_value = False
+ mock_check.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "ssh service on \"qnetd-node\" not available"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr_local(self, mock_getaddrinfo, mock_ping, mock_in_local):
+ mock_getaddrinfo.return_value = [(None, ("10.10.10.123",)),]
+ mock_in_local.return_value = True
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "host for qnetd must be a remote one"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('socket.getaddrinfo')
+ def test_check_qnetd_addr(self, mock_getaddrinfo):
+ mock_getaddrinfo.side_effect = socket.error
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qnetd_addr("qnetd-node")
+ excepted_err_string = "host \"qnetd-node\" is unreachable"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.valid_port')
+ def test_check_qdevice_port(self, mock_port):
+ mock_port.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_port("1")
+ excepted_err_string = "invalid qdevice port range(1024 - 65535)"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_algo(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_algo("1")
+ excepted_err_string = "invalid ALGORITHM choice: '1' (choose from 'ffsplit', 'lms')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_check_qdevice_tie_breaker(self, mock_is_active):
+ mock_is_active.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_tie_breaker("1")
+ excepted_err_string = "invalid qdevice tie_breaker(lowest/highest/valid_node_id)"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_tls(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_tls("1")
+ excepted_err_string = "invalid TLS choice: '1' (choose from 'on', 'off', 'required')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_hm(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics_mode("1")
+ excepted_err_string = "invalid MODE choice: '1' (choose from 'on', 'sync', 'off')"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ def test_check_qdevice_he_path_error(self):
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics("command1")
+ excepted_err_string = "commands for heuristics should be absolute path"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('os.path.exists')
+ def test_check_qdevice_he_not_exist_erro(self, mock_exists):
+ mock_exists.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_qdevice_heuristics("/usr/bin/testst")
+ excepted_err_string = "command /usr/bin/testst not exist"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_check_package_installed(self, mock_installed):
+ mock_installed.return_value = False
+ with self.assertRaises(ValueError) as err:
+ qdevice.QDevice.check_package_installed("corosync-qdevice")
+ excepted_err_string = "Package \"corosync-qdevice\" not installed on this node"
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_heuristics_mode')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_heuristics')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_tls')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_tie_breaker')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_algo')
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_port')
+ @mock.patch('crmsh.qdevice.QDevice.check_qnetd_addr')
+ @mock.patch('crmsh.qdevice.QDevice.check_package_installed')
+ def test_valid_qdevice_options(self, mock_installed, mock_check_qnetd, mock_check_port,
+ mock_check_algo, mock_check_tie, mock_check_tls, mock_check_h, mock_check_hm):
+ self.qdevice_with_ip.valid_qdevice_options()
+ mock_installed.assert_called_once_with("corosync-qdevice")
+ mock_check_qnetd.assert_called_once_with("10.10.10.123")
+
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_not_installed(self, mock_installed):
+ self.qdevice_with_ip.qnetd_ip = "10.10.10.123"
+ mock_installed.return_value = False
+ excepted_err_string = 'Package "corosync-qnetd" not installed on 10.10.10.123!\nCluster service already successfully started on this node except qdevice service.\nIf you still want to use qdevice, install "corosync-qnetd" on 10.10.10.123.\nThen run command "crm cluster init" with "qdevice" stage, like:\n crm cluster init qdevice qdevice_related_options\nThat command will setup qdevice separately.'
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_ip.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_duplicated_with_qnetd_running(self, mock_installed, mock_is_active, mock_run):
+ mock_installed.return_value = True
+ mock_is_active.return_value = True
+ mock_run.return_value = "data"
+ excepted_err_string = "This cluster's name \"cluster1\" already exists on qnetd server!\nPlease consider to use the different cluster-name property."
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_stage_cluster_name.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_is_active.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_run.assert_called_once_with("corosync-qnetd-tool -l -c cluster1", "10.10.10.123")
+
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+ @mock.patch("crmsh.utils.package_is_installed")
+ def test_valid_qnetd_duplicated_without_qnetd_running(self, mock_installed, mock_is_active, mock_run):
+ mock_installed.return_value = True
+ mock_is_active.return_value = False
+ excepted_err_string = "This cluster's name \"hacluster1\" already exists on qnetd server!\nCluster service already successfully started on this node except qdevice service.\nIf you still want to use qdevice, consider to use the different cluster-name property.\nThen run command \"crm cluster init\" with \"qdevice\" stage, like:\n crm cluster init qdevice qdevice_related_options\nThat command will setup qdevice separately."
+ self.maxDiff = None
+
+ with self.assertRaises(ValueError) as err:
+ self.qdevice_with_cluster_name.valid_qnetd()
+ self.assertEqual(excepted_err_string, str(err.exception))
+
+ mock_installed.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_is_active.assert_called_once_with("corosync-qnetd", remote_addr="10.10.10.123")
+ mock_run.assert_called_once_with("test -f /etc/corosync/qnetd/nssdb/cluster-hacluster1.crt", "10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.enable_service")
+ def test_enable_qnetd(self, mock_enable):
+ self.qdevice_with_ip.enable_qnetd()
+ mock_enable.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.disable_service")
+ def test_disable_qnetd(self, mock_disable):
+ self.qdevice_with_ip.disable_qnetd()
+ mock_disable.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.start_service")
+ def test_start_qnetd(self, mock_start):
+ self.qdevice_with_ip.start_qnetd()
+ mock_start.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.service_manager.ServiceManager.stop_service")
+ def test_stop_qnetd(self, mock_stop):
+ self.qdevice_with_ip.stop_qnetd()
+ mock_stop.assert_called_once_with("corosync-qnetd.service", remote_addr="10.10.10.123")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_qnetd", new_callable=mock.PropertyMock)
+ def test_init_db_on_qnetd_already_exists(self, mock_qnetd_cacert, mock_call, mock_log):
+ mock_call.return_value = [("10.10.10.123", (0, None, None))]
+ mock_qnetd_cacert.return_value = "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt"
+ self.qdevice_with_ip.init_db_on_qnetd.__wrapped__(self.qdevice_with_ip)
+ mock_call.assert_called_once_with(["10.10.10.123"],
+ "test -f {}".format(mock_qnetd_cacert.return_value))
+ mock_qnetd_cacert.assert_called_once_with()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_qnetd", new_callable=mock.PropertyMock)
+ def test_init_db_on_qnetd(self, mock_qnetd_cacert, mock_call, mock_log):
+ mock_call.side_effect = [ValueError(mock.Mock(), "Failed on 10.10.10.123: error happen"),
+ [("10.10.10.123", (0, None, None))]]
+ mock_qnetd_cacert.return_value = "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt"
+
+ self.qdevice_with_ip.init_db_on_qnetd.__wrapped__(self.qdevice_with_ip)
+
+ mock_call.assert_has_calls([
+ mock.call(["10.10.10.123"], "test -f {}".format(mock_qnetd_cacert.return_value)),
+ mock.call(["10.10.10.123"], "corosync-qnetd-certutil -i")
+ ])
+ mock_qnetd_cacert.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 1: Initialize database on 10.10.10.123",
+ 'corosync-qnetd-certutil -i')
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ def test_fetch_qnetd_crt_from_qnetd_exist(self, mock_qnetd_cacert_local,
+ mock_parallax_slurp, mock_exists, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_exists.return_value = True
+
+ self.qdevice_with_ip.fetch_qnetd_crt_from_qnetd()
+
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_local.return_value)
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_parallax_slurp.assert_not_called()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ def test_fetch_qnetd_crt_from_qnetd(self, mock_qnetd_cacert_local,
+ mock_parallax_slurp, mock_exists, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_exists.return_value = False
+
+ self.qdevice_with_ip.fetch_qnetd_crt_from_qnetd()
+
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_local.return_value)
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 2: Fetch qnetd-cacert.crt from 10.10.10.123")
+ mock_parallax_slurp.assert_called_once_with(["10.10.10.123"], "/etc/corosync/qdevice/net", "/etc/corosync/qnetd/nssdb/qnetd-cacert.crt")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_qnetd_crt_to_cluster_one_node(self, mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com"]
+
+ self.qdevice_with_ip.copy_qnetd_crt_to_cluster()
+
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_not_called()
+ mock_log.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("os.path.dirname")
+ def test_copy_qnetd_crt_to_cluster(self, mock_dirname, mock_qnetd_cacert_local,
+ mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_dirname.return_value = "/etc/corosync/qdevice/net/10.10.10.123"
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+
+ self.qdevice_with_ip.copy_qnetd_crt_to_cluster()
+
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 3: Copy exported qnetd-cacert.crt to ['node2.com']")
+ mock_copy.assert_called_once_with(["node2.com"], mock_dirname.return_value,
+ "/etc/corosync/qdevice/net", True)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ def test_init_db_on_cluster(self, mock_list_nodes, mock_qnetd_cacert_local, mock_call, mock_log):
+ mock_list_nodes.return_value = ["node1", "node2"]
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+ mock_call.return_value = [("node1", (0, None, None)), ("node2", (0, None, None))]
+
+ self.qdevice_with_ip.init_db_on_cluster()
+
+ mock_list_nodes.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_log.assert_called_once_with("Step 4: Initialize database on ['node1', 'node2']",
+ 'corosync-qdevice-net-certutil -i -c /etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt')
+ mock_call.assert_called_once_with(mock_list_nodes.return_value,
+ "corosync-qdevice-net-certutil -i -c {}".format(mock_qnetd_cacert_local.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ def test_create_ca_request(self, mock_stdout_stderr, mock_log):
+ mock_stdout_stderr.return_value = (0, None, None)
+
+ self.qdevice_with_cluster_name.create_ca_request()
+
+ mock_log.assert_called_once_with("Step 5: Generate certificate request qdevice-net-node.crq",
+ 'corosync-qdevice-net-certutil -r -n hacluster1')
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -r -n hacluster1")
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_qnetd", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_crq_to_qnetd(self, mock_copy, mock_qdevice_crq_local,
+ mock_qdevice_crq_qnetd, mock_log):
+ mock_qdevice_crq_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.crq"
+ mock_qdevice_crq_qnetd.return_value = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq"
+
+ self.qdevice_with_ip.copy_crq_to_qnetd()
+
+ mock_log.assert_called_once_with("Step 6: Copy qdevice-net-node.crq to 10.10.10.123")
+ mock_copy.assert_called_once_with(["10.10.10.123"], mock_qdevice_crq_local.return_value,
+ mock_qdevice_crq_qnetd.return_value, False)
+ mock_qdevice_crq_local.assert_called_once_with()
+ mock_qdevice_crq_qnetd.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_crq_on_qnetd", new_callable=mock.PropertyMock)
+ def test_sign_crq_on_qnetd(self, mock_qdevice_crq_qnetd, mock_call, mock_log):
+ mock_qdevice_crq_qnetd.return_value = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq"
+ mock_call.return_value = ["10.10.10.123", (0, None, None)]
+
+ self.qdevice_with_ip.cluster_name = "hacluster"
+ self.qdevice_with_ip.sign_crq_on_qnetd()
+
+ mock_log.assert_called_once_with("Step 7: Sign and export cluster certificate on 10.10.10.123",
+ 'corosync-qnetd-certutil -s -c /etc/corosync/qnetd/nssdb/qdevice-net-node.crq -n hacluster')
+ mock_qdevice_crq_qnetd.assert_called_once_with()
+ mock_call.assert_called_once_with(["10.10.10.123"],
+ "corosync-qnetd-certutil -s -c {} -n hacluster".format(mock_qdevice_crq_qnetd.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_qnetd", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_cluster_crt_from_qnetd(self, mock_parallax_slurp, mock_crt_on_qnetd, mock_log):
+ mock_crt_on_qnetd.return_value = "/etc/corosync/qnetd/nssdb/cluster-hacluster.crt"
+
+ self.qdevice_with_ip.cluster_name = "hacluster"
+ self.qdevice_with_ip.fetch_cluster_crt_from_qnetd()
+
+ mock_log.assert_called_once_with("Step 8: Fetch cluster-hacluster.crt from 10.10.10.123")
+ mock_crt_on_qnetd.assert_has_calls([mock.call(), mock.call()])
+ mock_parallax_slurp.assert_called_once_with(["10.10.10.123"], "/etc/corosync/qdevice/net", mock_crt_on_qnetd.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cluster_crt_on_local", new_callable=mock.PropertyMock)
+ def test_import_cluster_crt(self, mock_crt_on_local, mock_stdout_stderr, mock_log):
+ mock_crt_on_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/cluster-hacluster.crt"
+
+ self.qdevice_with_ip.import_cluster_crt()
+
+ mock_log.assert_called_once_with("Step 9: Import certificate file cluster-hacluster.crt on local",
+ 'corosync-qdevice-net-certutil -M -c /etc/corosync/qdevice/net/10.10.10.123/cluster-hacluster.crt')
+ mock_crt_on_local.assert_has_calls([mock.call(), mock.call()])
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -M -c {}".format(mock_crt_on_local.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ def test_copy_p12_to_cluster_one_node(self, mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com"]
+
+ self.qdevice_with_ip.copy_p12_to_cluster()
+
+ mock_log.assert_not_called()
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.utils.list_cluster_nodes")
+ @mock.patch("crmsh.utils.this_node")
+ @mock.patch("crmsh.parallax.parallax_copy")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ def test_copy_p12_to_cluster(self, mock_p12_on_local,
+ mock_copy, mock_this_node, mock_list_nodes, mock_log):
+ mock_this_node.return_value = "node1.com"
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+
+ self.qdevice_with_ip.copy_p12_to_cluster()
+
+ mock_log.assert_called_once_with("Step 10: Copy qdevice-net-node.p12 to ['node2.com']")
+ mock_this_node.assert_called_once_with()
+ mock_list_nodes.assert_called_once_with()
+ mock_copy.assert_called_once_with(["node2.com"], mock_p12_on_local.return_value,
+ mock_p12_on_local.return_value, False)
+ mock_p12_on_local.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.utils.list_cluster_nodes_except_me")
+ def test_import_p12_on_cluster_one_node(self, mock_list_nodes, mock_call, mock_log):
+ mock_list_nodes.return_value = []
+
+ self.qdevice_with_ip.import_p12_on_cluster()
+
+ mock_log.assert_not_called()
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_not_called()
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.utils.list_cluster_nodes_except_me")
+ def test_import_p12_on_cluster(self, mock_list_nodes, mock_p12_on_local, mock_log, mock_call):
+ mock_list_nodes.return_value = ["node2", "node3"]
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+ mock_call.return_value = [("node2", (0, None, None)), ("node3", (0, None, None))]
+
+ self.qdevice_with_ip.import_p12_on_cluster()
+
+ mock_log.assert_called_once_with("Step 11: Import qdevice-net-node.p12 on ['node2', 'node3']",
+ 'corosync-qdevice-net-certutil -m -c /etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12')
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_called_once_with(
+ ["node2", "node3"],
+ "corosync-qdevice-net-certutil -m -c {}".format(mock_p12_on_local.return_value))
+ mock_p12_on_local.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.import_p12_on_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.copy_p12_to_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.import_cluster_crt")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_cluster_crt_from_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.sign_crq_on_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.copy_crq_to_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.create_ca_request")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.copy_qnetd_crt_to_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_qnetd_crt_from_qnetd")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_qnetd")
+ def test_certificate_process_on_init(self, mock_init_db_on_qnetd, mock_fetch_qnetd_crt_from_qnetd,
+ mock_copy_qnetd_crt_to_cluster, mock_init_db_on_cluster, mock_create_ca_request,
+ mock_copy_crq_to_qnetd, mock_sign_crq_on_qnetd, mock_fetch_cluster_crt_from_qnetd,
+ mock_import_cluster_crt, mock_copy_p12_to_cluster, mock_import_p12_on_cluster):
+
+ self.qdevice_with_ip.certificate_process_on_init()
+ mock_init_db_on_qnetd.assert_called_once_with()
+ mock_fetch_qnetd_crt_from_qnetd.assert_called_once_with()
+ mock_copy_qnetd_crt_to_cluster.assert_called_once_with()
+ mock_init_db_on_cluster.assert_called_once_with()
+ mock_create_ca_request.assert_called_once_with()
+ mock_copy_crq_to_qnetd.assert_called_once_with()
+ mock_sign_crq_on_qnetd.assert_called_once_with()
+ mock_fetch_cluster_crt_from_qnetd.assert_called_once_with()
+ mock_import_cluster_crt.assert_called_once_with()
+ mock_copy_p12_to_cluster.assert_called_once_with()
+ mock_import_p12_on_cluster.assert_called_once_with()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_qnetd_crt_from_cluster_exist(self, mock_parallax_slurp, mock_qnetd_cacert_local,
+ mock_qnetd_cacert_cluster, mock_exists, mock_log):
+ mock_exists.return_value = True
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+
+ self.qdevice_with_ip_cluster_node.fetch_qnetd_crt_from_cluster()
+
+ mock_log.assert_not_called()
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_cluster.return_value)
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_not_called()
+ mock_parallax_slurp.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_qnetd_crt_from_cluster(self, mock_parallax_slurp, mock_qnetd_cacert_local,
+ mock_qnetd_cacert_cluster, mock_exists, mock_log):
+ mock_exists.return_value = False
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+ mock_qnetd_cacert_local.return_value = "/etc/corosync/qdevice/net/10.10.10.123/qnetd-cacert.crt"
+
+ self.qdevice_with_ip_cluster_node.fetch_qnetd_crt_from_cluster()
+
+ mock_log.assert_called_once_with("Step 1: Fetch qnetd-cacert.crt from node1.com")
+ mock_exists.assert_called_once_with(mock_qnetd_cacert_cluster.return_value)
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_qnetd_cacert_local.assert_called_once_with()
+ mock_parallax_slurp.assert_called_once_with(["node1.com"], "/etc/corosync/qdevice/net", mock_qnetd_cacert_local.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qnetd_cacert_on_cluster", new_callable=mock.PropertyMock)
+ def test_init_db_on_local(self, mock_qnetd_cacert_cluster, mock_stdout_stderr, mock_log):
+ mock_qnetd_cacert_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt"
+ mock_stdout_stderr.return_value = (0, None, None)
+
+ self.qdevice_with_ip_cluster_node.init_db_on_local()
+
+ mock_log.assert_called_once_with("Step 2: Initialize database on local",
+ 'corosync-qdevice-net-certutil -i -c /etc/corosync/qdevice/net/node1.com/qnetd-cacert.crt')
+ mock_qnetd_cacert_cluster.assert_called_once_with()
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -i -c {}".format(mock_qnetd_cacert_cluster.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_p12_from_cluster_exist(self, mock_parallax_slurp, mock_p12_on_local,
+ mock_p12_on_cluster, mock_exists, mock_log):
+ mock_exists.return_value = True
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.fetch_p12_from_cluster()
+
+ mock_log.assert_not_called()
+ mock_exists.assert_called_once_with(mock_p12_on_cluster.return_value)
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_p12_on_local.assert_not_called()
+ mock_parallax_slurp.assert_not_called()
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("os.path.exists")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_local", new_callable=mock.PropertyMock)
+ @mock.patch("crmsh.parallax.parallax_slurp")
+ def test_fetch_p12_from_cluster(self, mock_parallax_slurp, mock_p12_on_local,
+ mock_p12_on_cluster, mock_exists, mock_log):
+ mock_exists.return_value = False
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+ mock_p12_on_local.return_value = "/etc/corosync/qdevice/net/nssdb/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.fetch_p12_from_cluster()
+
+ mock_log.assert_called_once_with("Step 3: Fetch qdevice-net-node.p12 from node1.com")
+ mock_exists.assert_called_once_with(mock_p12_on_cluster.return_value)
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_p12_on_local.assert_called_once_with()
+ mock_parallax_slurp.assert_called_once_with(["node1.com"], '/etc/corosync/qdevice/net', mock_p12_on_local.return_value)
+
+ @mock.patch("crmsh.qdevice.QDevice.log_only_to_file")
+ @mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+ @mock.patch("crmsh.qdevice.QDevice.qdevice_p12_on_cluster", new_callable=mock.PropertyMock)
+ def test_import_p12_on_local(self, mock_p12_on_cluster, mock_stdout_stderr, mock_log):
+ mock_p12_on_cluster.return_value = "/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12"
+
+ self.qdevice_with_ip_cluster_node.import_p12_on_local()
+
+ mock_log.assert_called_once_with("Step 4: Import cluster certificate and key",
+ 'corosync-qdevice-net-certutil -m -c /etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12')
+ mock_p12_on_cluster.assert_called_once_with()
+ mock_stdout_stderr.assert_called_once_with("corosync-qdevice-net-certutil -m -c {}".format(mock_p12_on_cluster.return_value))
+
+ @mock.patch("crmsh.qdevice.QDevice.import_p12_on_local")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_p12_from_cluster")
+ @mock.patch("crmsh.qdevice.QDevice.init_db_on_local")
+ @mock.patch("crmsh.qdevice.QDevice.fetch_qnetd_crt_from_cluster")
+ def test_certificate_process_on_join(self, mock_fetch_qnetd_crt_from_cluster, mock_init_db_on_local,
+ mock_fetch_p12_from_cluster, mock_import_p12_on_local):
+ self.qdevice_with_ip.certificate_process_on_join()
+ mock_fetch_qnetd_crt_from_cluster.assert_called_once_with()
+ mock_init_db_on_local.assert_called_once_with()
+ mock_fetch_p12_from_cluster.assert_called_once_with()
+ mock_import_p12_on_local.assert_called_once_with()
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.make_section")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_write_qdevice_config(self, mock_read_file, mock_conf, mock_parser, mock_mksection, mock_str2file):
+ mock_mksection.side_effect = [
+ ["device {", "}"],
+ ["net {", "}"]
+ ]
+ mock_read_file.return_value = "data"
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_instance = mock.Mock()
+ mock_parser.return_value = mock_instance
+ mock_instance.to_string.return_value = "string data"
+
+ self.qdevice_with_ip.write_qdevice_config()
+
+ mock_conf.assert_has_calls([mock.call(), mock.call()])
+ mock_parser.assert_called_once_with("data")
+ mock_instance.remove.assert_called_once_with("quorum.device")
+ mock_instance.add.assert_has_calls([
+ mock.call('quorum', ["device {", "}"]),
+ mock.call('quorum.device', ["net {", "}"])
+ ])
+ mock_instance.set.assert_has_calls([
+ mock.call('quorum.device.votes', '1'),
+ mock.call('quorum.device.model', 'net'),
+ mock.call('quorum.device.net.tls', 'on'),
+ mock.call('quorum.device.net.host', '10.10.10.123'),
+ mock.call('quorum.device.net.port', 5403),
+ mock.call('quorum.device.net.algorithm', 'ffsplit'),
+ mock.call('quorum.device.net.tie_breaker', 'lowest')
+ ])
+ mock_instance.to_string.assert_called_once_with()
+ mock_mksection.assert_has_calls([
+ mock.call('quorum.device', []),
+ mock.call('quorum.device.net', [])
+ ])
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ @mock.patch("crmsh.utils.str2file")
+ @mock.patch("crmsh.corosync.Parser")
+ @mock.patch("crmsh.corosync.conf")
+ @mock.patch("crmsh.utils.read_from_file")
+ def test_remove_qdevice_config(self, mock_read_file, mock_conf, mock_parser, mock_str2file):
+ mock_conf.side_effect = ["corosync.conf", "corosync.conf"]
+ mock_read_file.return_value = "data"
+ mock_instance = mock.Mock()
+ mock_parser.return_value = mock_instance
+ mock_instance.to_string.return_value = "string data"
+
+ self.qdevice_with_ip.remove_qdevice_config()
+
+ mock_conf.assert_has_calls([mock.call(), mock.call()])
+ mock_parser.assert_called_once_with("data")
+ mock_instance.remove.assert_called_once_with("quorum.device")
+ mock_instance.to_string.assert_called_once_with()
+ mock_str2file.assert_called_once_with("string data", "corosync.conf")
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('os.path.exists')
+ def test_remove_qdevice_db_not_exist(self, mock_exists, mock_list_nodes, mock_call):
+ mock_exists.return_value = False
+
+ self.qdevice_with_ip.remove_qdevice_db()
+
+ mock_exists.assert_called_once_with('/etc/corosync/qdevice/net/nssdb')
+ mock_list_nodes.assert_not_called()
+ mock_call.assert_not_called()
+
+ @mock.patch("crmsh.parallax.parallax_call")
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ @mock.patch('os.path.exists')
+ def test_remove_qdevice_db(self, mock_exists, mock_list_nodes, mock_call):
+ mock_exists.return_value = True
+ mock_list_nodes.return_value = ["node1.com", "node2.com"]
+ mock_call.return_value = [("node1.com", (0, None, None)), ("node2.com", (0, None, None))]
+
+ self.qdevice_with_ip.remove_qdevice_db()
+
+ mock_exists.assert_called_once_with('/etc/corosync/qdevice/net/nssdb')
+ mock_list_nodes.assert_called_once_with()
+ mock_call.assert_called_once_with(mock_list_nodes.return_value,
+ 'rm -rf /etc/corosync/qdevice/net/*'.format())
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_check_qdevice_vote(self, mock_run, mock_get_value, mock_warning):
+ data = """
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 A,V,NMW 192.168.122.221 (local)
+ 0 0 Qdevice
+ """
+ mock_run.return_value = data
+ mock_get_value.return_value = "qnetd-node"
+ qdevice.QDevice.check_qdevice_vote()
+ mock_run.assert_called_once_with("corosync-quorumtool -s", success_exit_status={0, 2})
+ mock_get_value.assert_called_once_with("quorum.device.net.host")
+ mock_warning.assert_called_once_with("Qdevice's vote is 0, which simply means Qdevice can't talk to Qnetd(qnetd-node) for various reasons.")
+
+ @mock.patch('crmsh.qdevice.evaluate_qdevice_quorum_effect')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.qdevice.QDevice.remove_qdevice_db')
+ def test_config_and_start_qdevice(self, mock_rm_db, mock_status_long, mock_evaluate):
+ mock_status_long.return_value.__enter__ = mock.Mock()
+ mock_status_long.return_value.__exit__ = mock.Mock()
+ self.qdevice_with_ip.certificate_process_on_init = mock.Mock()
+ self.qdevice_with_ip.adjust_sbd_watchdog_timeout_with_qdevice = mock.Mock()
+ self.qdevice_with_ip.config_qdevice = mock.Mock()
+ self.qdevice_with_ip.start_qdevice_service = mock.Mock()
+
+ self.qdevice_with_ip.config_and_start_qdevice.__wrapped__(self.qdevice_with_ip)
+
+ mock_rm_db.assert_called_once_with()
+ mock_status_long.assert_called_once_with("Qdevice certification process")
+ self.qdevice_with_ip.certificate_process_on_init.assert_called_once_with()
+ self.qdevice_with_ip.adjust_sbd_watchdog_timeout_with_qdevice.assert_called_once_with()
+ self.qdevice_with_ip.config_qdevice.assert_called_once_with()
+ self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.sbd.SBDManager.is_using_diskless_sbd')
+ @mock.patch('crmsh.utils.check_all_nodes_reachable')
+ def test_adjust_sbd_watchdog_timeout_with_qdevice(self, mock_check_reachable, mock_using_diskless_sbd, mock_get_sbd_value, mock_update_config, mock_get_timeout, mock_set_property):
+ mock_using_diskless_sbd.return_value = True
+ mock_get_sbd_value.return_value = ""
+ mock_get_timeout.return_value = 100
+
+ self.qdevice_with_stage_cluster_name.adjust_sbd_watchdog_timeout_with_qdevice()
+
+ mock_check_reachable.assert_called_once_with()
+ mock_using_diskless_sbd.assert_called_once_with()
+ mock_get_sbd_value.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+ mock_update_config.assert_called_once_with({"SBD_WATCHDOG_TIMEOUT": str(sbd.SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE)})
+ mock_set_property.assert_called_once_with("stonith-timeout", 100)
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_reload(self, mock_status, mock_cluster_run, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RELOAD
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Starting corosync-qdevice.service in cluster"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ mock.call("systemctl restart corosync-qdevice")
+ ])
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('crmsh.bootstrap.wait_for_cluster')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_restart(self, mock_status, mock_cluster_run, mock_wait, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RESTART
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Restarting cluster service"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_wait.assert_called_once_with()
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ mock.call("crm cluster restart")
+ ])
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.qdevice.QDevice.start_qnetd')
+ @mock.patch('crmsh.qdevice.QDevice.enable_qnetd')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ def test_start_qdevice_service_warn(self, mock_status, mock_cluster_run, mock_warn, mock_enable_qnetd, mock_start_qnetd):
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RESTART_LATER
+
+ self.qdevice_with_ip.start_qdevice_service()
+
+ mock_status.assert_has_calls([
+ mock.call("Enable corosync-qdevice.service in cluster"),
+ mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+ mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+ ])
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable corosync-qdevice"),
+ ])
+ mock_warn.assert_called_once_with("To use qdevice service, need to restart cluster service manually on each node")
+ mock_enable_qnetd.assert_called_once_with()
+ mock_start_qnetd.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('crmsh.bootstrap.update_expected_votes')
+ @mock.patch('crmsh.log.LoggerUtils.status_long')
+ @mock.patch('crmsh.corosync.add_nodelist_from_cmaptool')
+ @mock.patch('crmsh.corosync.is_unicast')
+ @mock.patch('crmsh.qdevice.QDevice.write_qdevice_config')
+ def test_config_qdevice(self, mock_write, mock_is_unicast, mock_add_nodelist, mock_status_long,
+ mock_update_votes, mock_run):
+ mock_is_unicast.return_value = False
+ mock_status_long.return_value.__enter__ = mock.Mock()
+ mock_status_long.return_value.__exit__ = mock.Mock()
+ self.qdevice_with_ip.qdevice_reload_policy = qdevice.QdevicePolicy.QDEVICE_RELOAD
+
+ self.qdevice_with_ip.config_qdevice()
+
+ mock_write.assert_called_once_with()
+ mock_is_unicast.assert_called_once_with()
+ mock_add_nodelist.assert_called_once_with()
+ mock_status_long.assert_called_once_with("Update configuration")
+ mock_update_votes.assert_called_once_with()
+ mock_run.assert_called_once_with("crm corosync reload")
+
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_certification_files_on_qnetd_return(self, mock_configured):
+ mock_configured.return_value = False
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ mock_configured.assert_called_once_with()
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_remove_certification_files_on_qnetd(self, mock_configured, mock_get_value, mock_run):
+ mock_configured.return_value = True
+ mock_get_value.side_effect = ["qnetd-node", "cluster1"]
+ qdevice.QDevice.remove_certification_files_on_qnetd()
+ mock_configured.assert_called_once_with()
+ mock_get_value.assert_has_calls([
+ mock.call("quorum.device.net.host"),
+ mock.call("totem.cluster_name")])
+ crt_file = "/etc/corosync/qnetd/nssdb/cluster-cluster1.crt"
+ crt_cmd = "test -f {crt_file} && rm -f {crt_file}".format(crt_file=crt_file)
+ crq_file = "/etc/corosync/qnetd/nssdb/qdevice-net-node.crq.cluster1"
+ crq_cmd = "test -f {crq_file} && rm -f {crq_file}".format(crq_file=crq_file)
+ mock_run.assert_has_calls([
+ mock.call(crt_cmd, "qnetd-node"),
+ mock.call(crq_cmd, "qnetd-node")])
diff --git a/test/unittests/test_ratrace.py b/test/unittests/test_ratrace.py
new file mode 100644
index 0000000..6734b89
--- /dev/null
+++ b/test/unittests/test_ratrace.py
@@ -0,0 +1,131 @@
+import unittest
+from lxml import etree
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from crmsh import cibconfig
+from crmsh.ui_context import Context
+from crmsh.ui_resource import RscMgmt
+from crmsh.ui_root import Root
+
+
+class TestRATrace(unittest.TestCase):
+ """Unit tests for enabling/disabling RA tracing."""
+
+ context = Context(Root())
+ factory = cibconfig.cib_factory
+
+ def setUp(self):
+ self.factory._push_state()
+
+ def tearDown(self):
+ self.factory._pop_state()
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_resource(self, mock_error):
+ """Check setting RA tracing for a resource."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy"/>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the resource.
+ RscMgmt()._trace_resource(self.context, obj.obj_id, obj, '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-start-0', 'r1-stop-0'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-start-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-stop-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the resource.
+ RscMgmt()._untrace_resource(self.context, obj.obj_id, obj)
+ self.assertEqual(obj.node.xpath('operations/op/@id'), [])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op(self, mock_error):
+ """Check setting RA tracing for a specific operation."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'monitor')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ # Try untracing a non-existent operation.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'invalid-op')
+ self.assertEqual(str(err.exception), "Operation invalid-op not found in r1")
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_new(self, mock_error):
+ """Check setting RA tracing for an operation that is not in CIB."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace a regular operation that is not yet defined in CIB. The request
+ # should succeed and introduce an op node for the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'start', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-start-0'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-start-0"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Try tracing the monitor operation in the same way. The request should
+ # get rejected because no explicit interval is specified.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(str(err.exception), "No monitor operation configured for r1")
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op_stateful(self, mock_error):
+ """Check setting RA tracing for an operation on a stateful resource."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor" role="Master"/>
+ <op id="r1-monitor-11" interval="11" name="monitor" role="Slave"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op(self.context, obj.obj_id, obj, 'monitor', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10', 'r1-monitor-11'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-11"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op(self.context, obj.obj_id, obj, 'monitor')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10', 'r1-monitor-11'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ @mock.patch('logging.Logger.error')
+ def test_ratrace_op_interval(self, mock_error):
+ """Check setting RA tracing for an operation+interval."""
+ xml = '''<primitive class="ocf" id="r1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="r1-monitor-10" interval="10" name="monitor"/>
+ </operations>
+ </primitive>'''
+ obj = self.factory.create_from_node(etree.fromstring(xml))
+
+ # Trace the operation.
+ RscMgmt()._trace_op_interval(self.context, obj.obj_id, obj, 'monitor', '10', '/var/lib/heartbeat/trace_ra')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('operations/op[@id="r1-monitor-10"]/instance_attributes/nvpair[@name="trace_ra"]/@value'), ['1'])
+
+ # Untrace the operation.
+ RscMgmt()._untrace_op_interval(self.context, obj.obj_id, obj, 'monitor', '10')
+ self.assertEqual(obj.node.xpath('operations/op/@id'), ['r1-monitor-10'])
+ self.assertEqual(obj.node.xpath('.//*[@name="trace_ra"]'), [])
+
+ # Try untracing a non-existent operation.
+ with self.assertRaises(ValueError) as err:
+ RscMgmt()._untrace_op_interval(self.context, obj.obj_id, obj, 'invalid-op', '10')
+ self.assertEqual(str(err.exception), "Operation invalid-op with interval 10 not found in r1")
diff --git a/test/unittests/test_report_collect.py b/test/unittests/test_report_collect.py
new file mode 100644
index 0000000..faec951
--- /dev/null
+++ b/test/unittests/test_report_collect.py
@@ -0,0 +1,588 @@
+from subprocess import TimeoutExpired
+from crmsh.report import collect, constants
+import crmsh.log
+
+import unittest
+from unittest import mock
+
+
+class TestCollect(unittest.TestCase):
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.isfile')
+ def test_get_pcmk_log_no_config(self, mock_isfile, mock_warning):
+ mock_isfile.side_effect = [False, False, False]
+ res = collect.get_pcmk_log()
+ self.assertEqual(res, "")
+ mock_isfile.assert_has_calls([
+ mock.call(constants.PCMKCONF),
+ mock.call("/var/log/pacemaker/pacemaker.log"),
+ mock.call("/var/log/pacemaker.log")
+ ])
+ mock_warning.assert_called_once_with("No valid pacemaker log file found")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.isfile')
+ def test_get_pcmk_log(self, mock_isfile, mock_read, mock_warning):
+ mock_isfile.return_value = True
+ mock_read.return_value = """
+# has been enabled, those as well). This log is of more use to developers and
+# advanced system administrators, and when reporting problems.
+PCMK_logfile=/var/log/pacemaker/pacemaker.log
+
+# Set the permissions on the above log file to owner/group read/write
+ """
+ res = collect.get_pcmk_log()
+ self.assertEqual(res, "/var/log/pacemaker/pacemaker.log")
+ mock_isfile.assert_has_calls([
+ mock.call(constants.PCMKCONF),
+ mock.call("/var/log/pacemaker/pacemaker.log")
+ ])
+ mock_read.assert_called_once_with(constants.PCMKCONF)
+
+ @mock.patch('crmsh.report.utils.dump_logset')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.report.collect.get_pcmk_log')
+ @mock.patch('crmsh.report.collect.get_corosync_log')
+ def test_collect_ha_logs(self, mock_corosync_log, mock_get_log, mock_isfile, mock_dump):
+ mock_corosync_log.return_value = "/var/log/cluster/corosync.log"
+ mock_get_log.return_value = "/var/pacemaker.log"
+ mock_isfile.side_effect = [True, True]
+ mock_ctx_inst = mock.Mock(extra_log_list=[])
+
+ collect.collect_ha_logs(mock_ctx_inst)
+
+ mock_get_log.assert_called_once_with()
+ mock_isfile.assert_has_calls([
+ mock.call(mock_get_log.return_value),
+ mock.call(mock_corosync_log.return_value)
+ ])
+ mock_dump.assert_has_calls([
+ mock.call(mock_ctx_inst, mock_get_log.return_value),
+ mock.call(mock_ctx_inst, mock_corosync_log.return_value)
+ ])
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.corosync.conf')
+ def test_get_corosync_log_not_exist(self, mock_conf, mock_exists, mock_warning):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_exists.return_value = False
+ self.assertEqual(collect.get_corosync_log(), "")
+
+ @mock.patch('crmsh.corosync.get_value')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.corosync.conf')
+ def test_get_corosync_log(self, mock_conf, mock_exists, mock_get_value):
+ mock_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_get_value.return_value = "/var/log/cluster/corosync.log"
+ mock_exists.return_value = True
+ self.assertEqual(collect.get_corosync_log(), mock_get_value.return_value)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('crmsh.report.utils.ts_to_str')
+ def test_collect_journal_logs(self, mock_ts_to_str, mock_get_cmd_output,
+ mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.side_effect = [
+ constants.JOURNAL_F,
+ constants.JOURNAL_PCMK_F,
+ constants.JOURNAL_COROSYNC_F,
+ constants.JOURNAL_SBD_F
+ ]
+ mock_ctx_inst = mock.Mock(from_time=1234, to_time=5678, work_dir="/opt/work")
+ mock_ts_to_str.side_effect = ["10.10", "10.12"]
+ mock_get_cmd_output.side_effect = ["data_default", "data_pacemaker", "data_corosync", "data_sbd"]
+ collect.collect_journal_logs(mock_ctx_inst)
+ mock_ts_to_str.assert_has_calls([
+ mock.call(mock_ctx_inst.from_time),
+ mock.call(mock_ctx_inst.to_time)
+ ])
+ cmd_list = [
+ 'journalctl -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u pacemaker -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u corosync -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2',
+ 'journalctl -u sbd -o short-iso-precise --since "10.10" --until "10.12" --no-pager | tail -n +2'
+ ]
+ mock_get_cmd_output.assert_has_calls([
+ mock.call(cmd_list[0]),
+ mock.call(cmd_list[1]),
+ mock.call(cmd_list[2]),
+ mock.call(cmd_list[3]),
+ ])
+ mock_logger.debug2.assert_has_calls([
+ mock.call("Collect journal logs since: 10.10 until: 10.12"),
+ mock.call(f"Running command: {cmd_list[0]}"),
+ mock.call(f"Running command: {cmd_list[1]}"),
+ mock.call(f"Running command: {cmd_list[2]}"),
+ mock.call(f"Running command: {cmd_list[3]}"),
+ ])
+ mock_logger.debug.assert_has_calls([
+ mock.call(f"Dump jounal log for default into {constants.JOURNAL_F}"),
+ mock.call(f"Dump jounal log for pacemaker into {constants.JOURNAL_PCMK_F}"),
+ mock.call(f"Dump jounal log for corosync into {constants.JOURNAL_COROSYNC_F}"),
+ mock.call(f"Dump jounal log for sbd into {constants.JOURNAL_SBD_F}")
+ ])
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_dump_D_process_empty(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, None, None)
+ res = collect.dump_D_process()
+ self.assertEqual(res, "Dump D-state process stack: 0\n")
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_dump_D_process(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.side_effect = [
+ (0, "1000", None),
+ (0, "data1", None),
+ (0, "data2", None)
+ ]
+ res = collect.dump_D_process()
+ self.assertEqual(res, "Dump D-state process stack: 1\npid: 1000 comm: data1\ndata2\n\n")
+ mock_run_inst.get_stdout_stderr.assert_has_calls([
+ mock.call("ps aux|awk '$8 ~ /^D/{print $2}'"),
+ mock.call('cat /proc/1000/comm'),
+ mock.call('cat /proc/1000/stack')
+ ])
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info_no_config(self, mock_exists, mock_debug):
+ mock_exists.return_value = False
+ mock_ctx_inst = mock.Mock()
+ collect.collect_sbd_info(mock_ctx_inst)
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_debug.assert_called_once_with(f"SBD config file {constants.SBDCONF} does not exist")
+
+ @mock.patch('shutil.which')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info_no_cmd(self, mock_exists, mock_copy, mock_which):
+ mock_exists.return_value = True
+ mock_which.return_value = False
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+ collect.collect_sbd_info(mock_ctx_inst)
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_copy.assert_called_once_with(constants.SBDCONF, mock_ctx_inst.work_dir)
+ mock_which.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('builtins.open', create=True)
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('shutil.which')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.exists')
+ def test_collect_sbd_info(self, mock_exists, mock_copy, mock_which, mock_run, mock_debug, mock_open_file, mock_real_path):
+ mock_real_path.return_value = constants.SBD_F
+ mock_exists.return_value = True
+ mock_which.return_value = True
+ mock_open_write = mock.mock_open()
+ file_handle = mock_open_write.return_value.__enter__.return_value
+ mock_open_file.return_value = mock_open_write.return_value
+ mock_run.return_value = "data"
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+
+ collect.collect_sbd_info(mock_ctx_inst)
+
+ mock_exists.assert_called_once_with(constants.SBDCONF)
+ mock_copy.assert_called_once_with(constants.SBDCONF, mock_ctx_inst.work_dir)
+ mock_which.assert_called_once_with("sbd")
+ mock_open_file.assert_called_once_with(f"{mock_ctx_inst.work_dir}/{constants.SBD_F}", "w")
+ file_handle.write.assert_has_calls([
+ mock.call("\n\n#=====[ Command ] ==========================#\n"),
+ mock.call("# . /etc/sysconfig/sbd;export SBD_DEVICE;sbd dump;sbd list\n"),
+ mock.call("data")
+ ])
+ mock_debug.assert_called_once_with(f"Dump SBD config file into {constants.SBD_F}")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_pe_to_dot(self, mock_run, mock_warning):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (1, None, None)
+ collect.pe_to_dot("/opt/pe-input-0.bz2")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("crm_simulate -D /opt/pe-input-0.dot -x /opt/pe-input-0.bz2")
+ mock_warning.assert_called_once_with('pe_to_dot: %s -> %s failed', '/opt/pe-input-0.bz2', '/opt/pe-input-0.dot')
+
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_collect_pe_inputs_no_found(self, mock_logger, mock_find_files):
+ mock_ctx_inst = mock.Mock(pe_dir="/opt/pe_dir")
+ mock_find_files.return_value = []
+ collect.collect_pe_inputs(mock_ctx_inst)
+ mock_find_files.assert_called_once_with(mock_ctx_inst, [mock_ctx_inst.pe_dir])
+ mock_logger.debug2.assert_has_calls([
+ mock.call(f"Looking for PE files in {mock_ctx_inst.pe_dir}"),
+ mock.call("No PE file found for the giving time")
+ ])
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.pe_to_dot')
+ @mock.patch('os.symlink')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_collect_pe_inputs(self, mock_logger, mock_find_files, mock_mkdir, mock_symlink, mock_to_dot, mock_real_path):
+ mock_real_path.return_value = "pe_dir"
+ mock_ctx_inst = mock.Mock(pe_dir="/opt/pe_dir", work_dir="/opt/work_dir", speed_up=False)
+ mock_find_files.return_value = ["/opt/pe_dir/pe_input1", "/opt/pe_dir/pe_input2"]
+
+ collect.collect_pe_inputs(mock_ctx_inst)
+
+ mock_find_files.assert_called_once_with(mock_ctx_inst, [mock_ctx_inst.pe_dir])
+ mock_logger.debug2.assert_has_calls([
+ mock.call(f"Looking for PE files in {mock_ctx_inst.pe_dir}"),
+ mock.call(f"Found 2 PE files in {mock_ctx_inst.pe_dir}"),
+ ])
+ mock_logger.debug.assert_called_once_with(f"Dump PE files into pe_dir")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ def test_collect_sys_stats(self, mock_run, mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.return_value = constants.SYSSTATS_F
+ mock_run.side_effect = [
+ "data_hostname", "data_uptime", "data_ps_axf", "data_ps_auxw",
+ "data_top", "data_ip_addr", "data_ip_link", "data_ip_show", "data_iscsi",
+ "data_lspci", "data_mount", "data_cpuinfo", TimeoutExpired("df", 5)
+ ]
+ mock_ctx_inst = mock.Mock(work_dir="/opt")
+ collect.collect_sys_stats(mock_ctx_inst)
+ mock_logger.warning.assert_called_once_with(f"Timeout while running command: df")
+ mock_run.assert_has_calls([
+ mock.call("hostname", timeout=5),
+ mock.call("uptime", timeout=5),
+ mock.call("ps axf", timeout=5),
+ mock.call("ps auxw", timeout=5),
+ mock.call("top -b -n 1", timeout=5),
+ mock.call("ip addr", timeout=5),
+ mock.call("ip -s link", timeout=5),
+ mock.call("ip n show", timeout=5),
+ mock.call("lsscsi", timeout=5),
+ mock.call("lspci", timeout=5),
+ mock.call("mount", timeout=5),
+ mock.call("cat /proc/cpuinfo", timeout=5),
+ mock.call("df", timeout=5)
+ ])
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.get_distro_info')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.uname')
+ @mock.patch('crmsh.report.utils.Package')
+ def test_collect_sys_info(self, mock_package, mock_uname, mock_str2file, mock_get_distro, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.SYSINFO_F
+ mock_package_inst = mock.Mock()
+ mock_package.return_value = mock_package_inst
+ mock_package_inst.version = mock.Mock(return_value="version_data\n")
+ mock_package_inst.verify = mock.Mock(return_value="verify_data\n")
+ mock_ctx_inst = mock.Mock(speed_up=False, work_dir="/opt/work")
+ mock_uname.return_value = ("Linux", None, "4.5", None, "x86_64")
+ mock_get_distro.return_value = "suse"
+
+ collect.collect_sys_info(mock_ctx_inst)
+
+ mock_package.assert_called_once_with(constants.PACKAGES)
+ mock_str2file.assert_called_once_with('##### System info #####\nPlatform: Linux\nKernel release: 4.5\nArchitecture: x86_64\nDistribution: suse\n\n##### Installed cluster related packages #####\nversion_data\n\n\n##### Verification output of packages #####\nverify_data\n', '/opt/work/sysinfo.txt')
+ mock_debug.assert_called_once_with(f"Dump packages and platform info into {constants.SYSINFO_F}")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.dump_configurations')
+ @mock.patch('crmsh.report.collect.consume_cib_in_workdir')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.dump_runtime_state')
+ @mock.patch('crmsh.report.collect.ServiceManager')
+ def test_collect_config_running(self, mock_service, mock_dump_state, mock_write, mock_debug2, mock_cib, mock_dump_config, mock_real_path):
+ mock_real_path.return_value = "workdir"
+ mock_service_inst = mock.Mock()
+ mock_service.return_value = mock_service_inst
+ mock_service_inst.service_is_active.return_value = True
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_config(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.dump_configurations')
+ @mock.patch('crmsh.report.collect.consume_cib_in_workdir')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('shutil.copy2')
+ @mock.patch('crmsh.report.collect.ServiceManager')
+ def test_collect_config_stopped(self, mock_service, mock_copy2, mock_write, mock_debug2, mock_cib, mock_dump_config, mock_real_path):
+ mock_real_path.return_value = "workdir"
+ mock_service_inst = mock.Mock()
+ mock_service.return_value = mock_service_inst
+ mock_service_inst.service_is_active.return_value = False
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir", cib_dir="/var/log/pacemaker/cib")
+ collect.collect_config(mock_ctx_inst)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ @mock.patch('os.path.isfile')
+ def test_consume_cib_in_workdir(self, mock_isfile, mock_run, mock_str2file):
+ mock_isfile.return_value = True
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.side_effect = ["data1", "data2"]
+ collect.consume_cib_in_workdir("/workdir")
+ mock_isfile.assert_called_once_with(f"/workdir/{constants.CIB_F}")
+ mock_run_inst.get_stdout_or_raise_error.assert_has_calls([
+ mock.call('CIB_file=/workdir/cib.xml crm configure show'),
+ mock.call('crm_verify -V -x /workdir/cib.xml')
+ ])
+ mock_str2file.assert_has_calls([
+ mock.call("data1", f"/workdir/{constants.CONFIGURE_SHOW_F}"),
+ mock.call("data2", f"/workdir/{constants.CRM_VERIFY_F}")
+ ])
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_collect_ratraces_return(self, mock_run, mock_logger):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, "data", None)
+ mock_ctx_inst = mock.Mock(node_list=["node1"])
+ collect.collect_ratraces(mock_ctx_inst)
+ mock_logger.debug2.assert_not_called()
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('shutil.copy2')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_collect_ratraces(self, mock_run, mock_find, mock_mkdirp, mock_copy, mock_logger, mock_real_path):
+ mock_real_path.return_value = "/var/log"
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "INFO: Trace for .* is written to /var/log/cluster/pacemaker.log"
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, data, None)
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/work")
+ mock_find.return_value = ["/var/log/cluster"]
+
+ collect.collect_ratraces(mock_ctx_inst)
+
+ mock_logger.debug2.assert_called_once_with('Looking for RA trace files in "%s"', '/var/log/cluster')
+ mock_logger.debug.assert_called_once_with(f'Dump RA trace files into {mock_real_path.return_value}')
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_lsof_ocfs2_device(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mount_data = """
+/dev/vda3 on /home type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota)
+tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=169544k,nr_inodes=42386,mode=700,inode64)
+/dev/sda7 on /srv/clusterfs type ocfs2 (rw,relatime,heartbeat=non
+ """
+ mock_run_inst.get_stdout_stderr.side_effect = [(0, mount_data, None), (0, "data", None)]
+ res = collect.lsof_ocfs2_device()
+ self.assertEqual(res, "\n\n#=====[ Command ] ==========================#\n# lsof /dev/sda7\ndata")
+ mock_run_inst.get_stdout_stderr.assert_has_calls([
+ mock.call("mount"),
+ mock.call("lsof /dev/sda7")
+ ])
+
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('os.path.exists')
+ @mock.patch('shutil.which')
+ def test_ocfs2_commands_output(self, mock_which, mock_exists, mock_run):
+ mock_which.side_effect = [False for i in range(5)] + [True, True]
+ mock_exists.return_value = False
+ mock_run.return_value = "data"
+ res = collect.ocfs2_commands_output()
+ self.assertEqual(res, "\n\n#===== [ Command ] ==========================#\n# mount\ndata")
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info_error(self, mock_run, mock_str2file, mock_debug2):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (1, None, "error")
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('Failed to run "mounted.ocfs2 -d": error', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info_no_found(self, mock_run, mock_str2file, mock_debug2):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "data", None)
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('No ocfs2 partitions found', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.ocfs2_commands_output')
+ @mock.patch('crmsh.report.collect.lsof_ocfs2_device')
+ @mock.patch('crmsh.report.collect.dump_D_process')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_collect_ocfs2_info(self, mock_run, mock_str2file, mock_debug2, mock_D, mock_lsof, mock_output, mock_real_path):
+ mock_real_path.return_value = constants.OCFS2_F
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "line1\nline2", None)
+ mock_D.return_value = "data_D\n"
+ mock_lsof.return_value = "data_lsof\n"
+ mock_output.return_value = "data_output\n"
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ collect.collect_ocfs2_info(mock_ctx_inst)
+ mock_str2file.assert_called_once_with('data_D\ndata_lsof\ndata_output\n', '/opt/workdir/ocfs2.txt')
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('shutil.which')
+ def test_collect_dlm_info(self, mock_which, mock_get_output, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.DLM_DUMP_F
+ mock_which.return_value = True
+ ls_data = """
+dlm lockspaces
+name 08BB5A6A38EE491DBF63627EEB57E558
+id 0x19041a12
+ """
+ mock_get_output.side_effect = [ls_data, "lockdebug data", "dump data"]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ collect.collect_dlm_info(mock_ctx_inst)
+ mock_debug.assert_called_once_with(f"Dump DLM information into {constants.DLM_DUMP_F}")
+
+ @mock.patch('crmsh.report.collect.dump_core_info')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ def test_collect_coredump_info(self, mock_find, mock_basename, mock_warning, mock_dump):
+ mock_ctx_inst = mock.Mock(cores_dir_list=['/var/lib/pacemaker/cores'], work_dir="/opt/work_dir")
+ mock_find.return_value = ["/var/lib/pacemaker/cores/core.1"]
+ mock_basename.return_value = "core.1"
+ collect.collect_coredump_info(mock_ctx_inst)
+ mock_dump.assert_called_once_with("/opt/work_dir", mock_find.return_value)
+ mock_warning.assert_called_once_with(f"Found coredump file: {mock_find.return_value}")
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_find_binary_path_for_core_not_found(self, mock_run):
+ mock_run().get_stdout_stderr.return_value = (0, "Core not found", None)
+ res = collect.find_binary_path_for_core("core.1")
+ self.assertEqual("Cannot find the program path for core core.1", res)
+
+ @mock.patch('crmsh.report.collect.ShellUtils')
+ def test_find_binary_path_for_core(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "Core was generated by `/usr/sbin/crm_mon'", None)
+ res = collect.find_binary_path_for_core("core.1")
+ self.assertEqual("Core core.1 was generated by /usr/sbin/crm_mon", res)
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('shutil.which')
+ def test_dump_core_info_no_gdb(self, mock_which, mock_str2file, mock_logger, mock_real_path):
+ mock_real_path.return_value = constants.COREDUMP_F
+ mock_which.return_value = False
+ collect.dump_core_info("/opt/workdir", ["core.1"])
+ mock_logger.warning.assert_called_once_with("Please install gdb to get more info for coredump files")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.find_binary_path_for_core')
+ @mock.patch('shutil.which')
+ def test_dump_core_info(self, mock_which, mock_find_binary, mock_str2file, mock_debug2, mock_real_path):
+ mock_real_path.return_value = constants.COREDUMP_F
+ mock_which.return_value = True
+ mock_find_binary.return_value = "data"
+ collect.dump_core_info("/opt/workdir", ["core.1"])
+ mock_str2file.assert_called_once_with("data\n\nPlease utilize the gdb and debuginfo packages to obtain more detailed information locally", f"/opt/workdir/{constants.COREDUMP_F}")
+ mock_debug2(f"Dump coredump info into {constants.COREDUMP_F}")
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('pwd.getpwnam')
+ @mock.patch('os.stat')
+ @mock.patch('os.path.isdir')
+ def test_collect_perms_state(self, mock_isdir, mock_stat, mock_getpwnam, mock_str2file):
+ mock_ctx_inst = mock.Mock(
+ pcmk_lib_dir="/var/lib/pacemaker",
+ pe_dir="/var/lib/pacemaker/pe",
+ cib_dir="/var/lib/pacemaker/cib",
+ work_dir="/opt/work_dir"
+ )
+ mock_isdir.side_effect = [False, True, True]
+ mock_stat_inst_pe = mock.Mock(st_uid=1000, st_gid=1000, st_mode=0o750)
+ mock_stat_inst_cib = mock.Mock(st_uid=1000, st_gid=1000, st_mode=0o750)
+ mock_stat.side_effect = [mock_stat_inst_pe, mock_stat_inst_cib]
+ mock_getpwnam_inst_pe = mock.Mock(pw_uid=1000, pw_gid=1000)
+ mock_getpwnam_inst_cib = mock.Mock(pw_uid=1001, pw_gid=1000)
+ mock_getpwnam.side_effect = [mock_getpwnam_inst_pe, mock_getpwnam_inst_cib]
+
+ collect.collect_perms_state(mock_ctx_inst)
+
+ data = "##### Check perms for /var/lib/pacemaker: /var/lib/pacemaker is not a directory or does not exist\n##### Check perms for /var/lib/pacemaker/pe: OK\n##### Check perms for /var/lib/pacemaker/cib: Permissions or ownership for /var/lib/pacemaker/cib are incorrect\n"
+ mock_str2file.assert_called_once_with(data, f"/opt/work_dir/{constants.PERMISSIONS_F}")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.utils.get_dc')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.collect.sh.cluster_shell')
+ def test_dump_runtime_state(self, mock_run, mock_str2file, mock_debug, mock_get_dc, mock_this_node, mock_real_path):
+ mock_real_path.side_effect = [
+ constants.CRM_MON_F,
+ constants.CIB_F,
+ constants.MEMBERSHIP_F,
+ "workdir"
+ ]
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.side_effect = ["crm_mon_data", "cib_data", "crm_node_data"]
+ mock_get_dc.return_value = "node1"
+ mock_this_node.return_value = "node1"
+ collect.dump_runtime_state("/opt/workdir")
+ mock_debug.assert_has_calls([
+ mock.call(f"Dump cluster state into {constants.CRM_MON_F}"),
+ mock.call(f"Dump CIB contents into {constants.CIB_F}"),
+ mock.call(f"Dump members of this partition into {constants.MEMBERSHIP_F}"),
+ mock.call(f"Current DC is node1; Touch file 'DC' in workdir")
+ ])
+
+ @mock.patch('shutil.copytree')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ @mock.patch('shutil.copy2')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.corosync.conf')
+ def test_dump_configurations(self, mock_corosync_conf, mock_isfile, mock_copy2, mock_isdir, mock_basename, mock_copytree):
+ mock_corosync_conf.return_value = "/etc/corosync/corosync.conf"
+ mock_isfile.side_effect = [True, True, False, True]
+ mock_isdir.return_value = True
+ mock_basename.return_value = "drbd.d"
+ collect.dump_configurations("/opt/workdir")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.get_cmd_output')
+ @mock.patch('crmsh.report.utils.find_files_in_timespan')
+ def test_collect_corosync_blackbox(self, mock_find_files, mock_get_cmd_output, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = constants.COROSYNC_RECORDER_F
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ mock_find_files.return_value = ["/var/lib/corosync/fdata.1"]
+ mock_get_cmd_output.return_value = "data"
+ collect.collect_corosync_blackbox(mock_ctx_inst)
+ mock_debug.assert_called_once_with(f"Dump corosync blackbox info into {constants.COROSYNC_RECORDER_F}")
diff --git a/test/unittests/test_report_core.py b/test/unittests/test_report_core.py
new file mode 100644
index 0000000..dd6e842
--- /dev/null
+++ b/test/unittests/test_report_core.py
@@ -0,0 +1,551 @@
+from crmsh import config
+from crmsh.report import core, constants, utils, collect
+import crmsh.log
+
+import sys
+import argparse
+import unittest
+from unittest import mock
+
+
+class TestCapitalizedHelpFormatter(unittest.TestCase):
+ def setUp(self):
+ # Initialize the ArgumentParser with the CapitalizedHelpFormatter
+ self.parser = argparse.ArgumentParser(
+ formatter_class=core.CapitalizedHelpFormatter,
+ usage="usage: test"
+ )
+ self.parser.add_argument('--test', help='Test option')
+
+ def test_usage(self):
+ # Test that the usage is capitalized
+ usage_text = self.parser.format_usage()
+ self.assertTrue(usage_text.startswith('Usage: '))
+
+ def test_section_heading(self):
+ # Test that section headings are capitalized
+ section_text = self.parser.format_help()
+ self.assertTrue('Option' in section_text)
+
+
+class TestContext(unittest.TestCase):
+
+ @mock.patch('crmsh.report.utils.parse_to_timestamp')
+ @mock.patch('crmsh.report.utils.now')
+ @mock.patch('crmsh.report.core.config')
+ def setUp(self, mock_config, mock_now, mock_parse_to_timestamp):
+ mock_config.report = mock.Mock(
+ from_time="20230101",
+ compress=False,
+ collect_extra_logs="file1 file2",
+ remove_exist_dest=False,
+ single_node=False
+ )
+ mock_now.return_value = "12345"
+ mock_parse_to_timestamp.return_value = "54321"
+ self.context = core.Context()
+ self.context.load()
+
+ def test_attribute_setting(self):
+ self.context.name = "value"
+ self.assertEqual(self.context.name, "value")
+ self.context["age"] = 19
+ self.assertEqual(self.context.age, 19)
+ self.context.extra_log_list = ["file3", "file2"]
+ self.assertEqual(len(self.context.extra_log_list), 3)
+
+ @mock.patch('json.dumps')
+ def test_str(self, mock_dumps):
+ mock_dumps.return_value = "json str"
+ self.assertEqual(self.context.name, "crm_report")
+ self.assertEqual(self.context.from_time, "54321")
+ self.assertEqual(str(self.context), "json str")
+
+
+class TestRun(unittest.TestCase):
+
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dest_not_exist(self, mock_isdir):
+ mock_isdir.return_value = False
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report")
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("Directory /opt/test does not exist", str(err.exception))
+
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_filename_not_sane(self, mock_isdir, mock_basename, mock_sane):
+ mock_isdir.return_value = True
+ mock_sane.return_value = False
+ mock_basename.return_value = "report*"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report*")
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("report* is invalid file name", str(err.exception))
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('shutil.rmtree')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dir_exists_rmtree(self, mock_isdir, mock_basename, mock_sane, mock_rmtree, mock_pick):
+ mock_isdir.side_effect = [True, True]
+ mock_sane.return_value = True
+ mock_basename.return_value = "report"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report", no_compress=True, rm_exist_dest=True)
+ core.process_dest(mock_ctx_inst)
+ mock_rmtree.assert_called_once_with("/opt/test/report")
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ def test_process_dest_dir_exists(self, mock_isdir, mock_basename, mock_sane, mock_pick):
+ mock_isdir.side_effect = [True, True]
+ mock_sane.return_value = True
+ mock_basename.return_value = "report"
+ mock_ctx_inst = mock.Mock(dest="/opt/test/report", no_compress=True, rm_exist_dest=False)
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_dest(mock_ctx_inst)
+ self.assertEqual("Destination directory /opt/test/report exists, please cleanup or use -Z option", str(err.exception))
+
+ @mock.patch('crmsh.report.core.pick_compress_prog')
+ @mock.patch('crmsh.utils.is_filename_sane')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.utils.now')
+ def test_process_dest(self, mock_now, mock_isdir, mock_basename, mock_is_sane, mock_pick):
+ mock_now.return_value = "Mon-28-Aug-2023"
+ mock_isdir.side_effect = [True, False]
+ mock_is_sane.return_value = True
+ mock_basename.return_value = f"report.{mock_now.return_value}"
+ mock_ctx_inst = mock.Mock(dest=None, no_compress=False, compress_suffix=".bz2", name="report")
+
+ core.process_dest(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.dest_dir, ".")
+ mock_is_sane.assert_called_once_with(mock_basename.return_value)
+ self.assertEqual(mock_ctx_inst.dest_path, "./report.Mon-28-Aug-2023.tar.bz2")
+
+ @mock.patch('crmsh.report.core.pick_first_compress')
+ def test_pick_compress_prog(self, mock_pick):
+ mock_pick.return_value = (None, None)
+ mock_ctx_inst = mock.Mock()
+ core.pick_compress_prog(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.compress_prog, "cat")
+
+ @mock.patch('shutil.which')
+ def test_pick_first_compress_return(self, mock_which):
+ mock_which.return_value = True
+ prog, ext = core.pick_first_compress()
+ self.assertEqual(prog, "gzip")
+ self.assertEqual(ext, ".gz")
+ mock_which.assert_called_once_with("gzip")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('shutil.which')
+ def test_pick_first_compress(self, mock_which, mock_warn):
+ mock_which.side_effect = [False, False, False]
+ prog, ext = core.pick_first_compress()
+ self.assertIsNone(prog)
+ self.assertIsNone(ext)
+
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('logging.Logger.info')
+ def test_finalword(self, mock_info, mock_get_timespan):
+ mock_ctx_inst = mock.Mock(dest_path="./crm_report-Tue-15-Aug-2023.tar.bz2", node_list=["node1", "node2"])
+ mock_get_timespan.return_value = "2023-08-14 18:17 - 2023-08-15 06:17"
+ core.finalword(mock_ctx_inst)
+ mock_info.assert_has_calls([
+ mock.call(f"The report is saved in {mock_ctx_inst.dest_path}"),
+ mock.call(f"Report timespan: {mock_get_timespan.return_value}"),
+ mock.call(f"Including nodes: {' '.join(mock_ctx_inst.node_list)}"),
+ mock.call("Thank you for taking time to create this report")
+ ])
+
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.tmpfiles.create_dir')
+ def test_setup_workdir_collector(self, mock_create_dir, mock_collector, mock_mkdirp, mock_logger, mock_basename):
+ mock_create_dir.return_value = "/tmp/tmp_dir"
+ mock_ctx_inst = mock.Mock(dest="/opt/report", work_dir="/opt/work_dir", me="node1")
+ mock_collector.return_value = True
+ mock_basename.return_value = "report"
+ core.setup_workdir(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with(f"Setup work directory in {mock_ctx_inst.work_dir}")
+
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.tmpfiles.create_dir')
+ def test_setup_workdir(self, mock_create_dir, mock_collector, mock_mkdirp, mock_logger, mock_basename):
+ mock_create_dir.return_value = "/tmp/tmp_dir"
+ mock_ctx_inst = mock.Mock(dest="/opt/report", work_dir="/opt/work_dir")
+ mock_collector.return_value = False
+ mock_basename.return_value = "report"
+ core.setup_workdir(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with(f"Setup work directory in {mock_ctx_inst.work_dir}")
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.load_from_crmsh_config')
+ def test_load_context_attributes(self, mock_load, mock_isdir):
+ mock_ctx_inst = mock.Mock(cib_dir="/var/lib/pacemaker/cib")
+ mock_isdir.return_value = True
+
+ core.load_context_attributes(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.pcmk_lib_dir, "/var/lib/pacemaker")
+ self.assertEqual(mock_ctx_inst.cores_dir_list, ["/var/lib/pacemaker/cores", constants.COROSYNC_LIB])
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.config')
+ def test_load_from_crmsh_config(self, mock_config, mock_isdir):
+ mock_config.path = mock.Mock(
+ crm_config="/var/lib/pacemaker/cib",
+ crm_daemon_dir="/usr/lib/pacemaker",
+ pe_state_dir="/var/lib/pacemaker/pe"
+ )
+ mock_isdir.side_effect = [True, True, True]
+ mock_ctx_inst = mock.Mock()
+
+ core.load_from_crmsh_config(mock_ctx_inst)
+
+ self.assertEqual(mock_ctx_inst.cib_dir, mock_config.path.crm_config)
+ self.assertEqual(mock_ctx_inst.pcmk_exec_dir, mock_config.path.crm_daemon_dir)
+ self.assertEqual(mock_ctx_inst.pe_dir, mock_config.path.pe_state_dir)
+
+ @mock.patch('os.path.isdir')
+ @mock.patch('crmsh.report.core.config')
+ def test_load_from_crmsh_config_exception(self, mock_config, mock_isdir):
+ mock_config.path = mock.Mock(
+ crm_config="/var/lib/pacemaker/cib",
+ )
+ mock_isdir.return_value = False
+ mock_ctx_inst = mock.Mock()
+
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.load_from_crmsh_config(mock_ctx_inst)
+ self.assertEqual(f"Cannot find CIB directory", str(err.exception))
+
+ def test_adjust_verbosity_debug(self):
+ mock_ctx_inst = mock.Mock(debug=1)
+ core.adjust_verbosity(mock_ctx_inst)
+
+ def test_adjust_verbosity(self):
+ mock_ctx_inst = mock.Mock(debug=0)
+ config.core.debug = True
+ core.adjust_verbosity(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.adjust_verbosity')
+ @mock.patch('crmsh.report.core.config')
+ @mock.patch('json.loads')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_load_context(self, mock_logger, mock_json_loads, mock_config, mock_verbosity):
+ class Context:
+ def __str__(self):
+ return "data"
+ def __setitem__(self, key, value):
+ self.__dict__[key] = value
+
+ sys.argv = ["arg1", "arg2", "arg3"]
+ mock_config.report = mock.Mock(verbosity=None)
+ mock_json_loads.return_value = {"key": "value", "debug": "true"}
+ mock_ctx_inst = Context()
+ core.load_context(mock_ctx_inst)
+ mock_logger.debug2.assert_called_once_with("Loading context from collector: data")
+
+ @mock.patch('crmsh.report.core.adjust_verbosity')
+ @mock.patch('crmsh.report.core.process_arguments')
+ @mock.patch('crmsh.utils.check_empty_option_value')
+ @mock.patch('crmsh.report.core.add_arguments')
+ def test_parse_arguments(self, mock_parse, mock_check_space, mock_process, mock_verbosity):
+ mock_args = mock.Mock(option1="value1")
+ mock_parse.return_value = mock_args
+ mock_ctx_inst = mock.Mock()
+
+ core.parse_arguments(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.option1, "value1")
+
+ mock_check_space.assert_called_once_with(mock_args)
+ mock_process.assert_called_once_with(mock_ctx_inst)
+
+ def test_is_collector(self):
+ sys.argv = ["report", "__collector"]
+ self.assertEqual(core.is_collector(), True)
+
+ @mock.patch('crmsh.report.core.push_data')
+ @mock.patch('crmsh.report.core.collect_logs_and_info')
+ @mock.patch('crmsh.report.core.setup_workdir')
+ @mock.patch('crmsh.report.core.load_context')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.Context')
+ def test_run_impl_collector(self, mock_context, mock_collector, mock_load, mock_setup, mock_collect_info, mock_push):
+ mock_context.return_value = mock.Mock()
+ mock_ctx_inst = mock_context.return_value
+ mock_collector.side_effect = [True, True]
+
+ core.run_impl()
+
+ mock_context.assert_called_once_with()
+ mock_collector.assert_has_calls([mock.call(), mock.call()])
+ mock_load.assert_called_once_with(mock_ctx_inst)
+ mock_setup.assert_called_once_with(mock_ctx_inst)
+ mock_collect_info.assert_called_once_with(mock_ctx_inst)
+ mock_push.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.process_results')
+ @mock.patch('crmsh.report.core.collect_for_nodes')
+ @mock.patch('crmsh.report.core.find_ssh_user')
+ @mock.patch('crmsh.report.core.setup_workdir')
+ @mock.patch('crmsh.report.core.load_context_attributes')
+ @mock.patch('crmsh.report.core.parse_arguments')
+ @mock.patch('crmsh.report.core.is_collector')
+ @mock.patch('crmsh.report.core.Context')
+ def test_run_impl(self, mock_context, mock_collector, mock_parse, mock_load, mock_setup, mock_find_ssh, mock_collect, mock_process_results):
+ mock_context.return_value = mock.Mock()
+ mock_ctx_inst = mock_context.return_value
+ mock_collector.side_effect = [False, False]
+
+ core.run_impl()
+
+ mock_context.assert_called_once_with()
+ mock_collector.assert_has_calls([mock.call(), mock.call()])
+ mock_parse.assert_called_once_with(mock_ctx_inst)
+ mock_load.assert_called_once_with(mock_ctx_inst)
+ mock_setup.assert_called_once_with(mock_ctx_inst)
+ mock_find_ssh.assert_called_once_with(mock_ctx_inst)
+ mock_collect.assert_called_once_with(mock_ctx_inst)
+ mock_process_results.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.report.core.run_impl')
+ def test_run_exception_generic(self, mock_run, mock_log_error):
+ mock_run.side_effect = utils.ReportGenericError("error")
+ with self.assertRaises(SystemExit) as err:
+ core.run()
+ mock_log_error.assert_called_once_with("error")
+
+ @mock.patch('crmsh.report.utils.print_traceback')
+ @mock.patch('crmsh.report.core.run_impl')
+ def test_run_exception(self, mock_run, mock_print):
+ mock_run.side_effect = UnicodeDecodeError("encoding", b'', 0, 1, "error")
+ with self.assertRaises(SystemExit) as err:
+ core.run()
+ mock_print.assert_called_once_with()
+
+ @mock.patch('argparse.HelpFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_add_arguments_help(self, mock_argparse, mock_formatter):
+ mock_argparse_inst = mock.Mock()
+ mock_argparse.return_value = mock_argparse_inst
+ mock_args_inst = mock.Mock(help=True)
+ mock_argparse_inst.parse_args.return_value = mock_args_inst
+
+ with self.assertRaises(SystemExit):
+ core.add_arguments()
+
+ mock_argparse_inst.print_help.assert_called_once_with()
+
+ @mock.patch('crmsh.report.core.config')
+ @mock.patch('argparse.HelpFormatter')
+ @mock.patch('argparse.ArgumentParser')
+ def test_add_arguments(self, mock_argparse, mock_formatter, mock_config):
+ mock_argparse_inst = mock.Mock()
+ mock_argparse.return_value = mock_argparse_inst
+ mock_args_inst = mock.Mock(help=False, debug=True)
+ mock_argparse_inst.parse_args.return_value = mock_args_inst
+ mock_config.report = mock.Mock(verbosity=False)
+
+ core.add_arguments()
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.to_ascii')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ def test_push_data(self, mock_sh_utils, mock_to_ascii, mock_logger):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout_stderr.return_value = (0, "data", "error")
+ mock_to_ascii.return_value = "error"
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", main_node="node1", me="node1")
+
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.push_data(mock_ctx_inst)
+ self.assertEqual("error", str(err.exception))
+
+ mock_logger.debug2.assert_called_once_with("Pushing data from node1:/opt/work_dir to node1")
+ mock_sh_utils_inst.get_stdout_stderr.assert_called_once_with("cd /opt/work_dir/.. && tar -h -c node1", raw=True)
+
+ @mock.patch('crmsh.report.core.finalword')
+ @mock.patch('shutil.move')
+ @mock.patch('crmsh.report.utils.create_description_template')
+ @mock.patch('crmsh.report.utils.analyze')
+ def test_process_results_no_compress(self, mock_analyze, mock_create, mock_move, mock_final):
+ mock_ctx_inst = mock.Mock(speed_up=True, work_dir="/opt/work_dir", dest_dir="/opt/user", no_compress=True)
+ core.process_results(mock_ctx_inst)
+ mock_analyze.assert_called_once_with(mock_ctx_inst)
+ mock_create.assert_called_once_with(mock_ctx_inst)
+ mock_final.assert_called_once_with(mock_ctx_inst)
+ mock_move.assert_called_once_with(mock_ctx_inst.work_dir, mock_ctx_inst.dest_dir)
+
+ @mock.patch('crmsh.report.core.finalword')
+ @mock.patch('crmsh.report.core.sh.cluster_shell')
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.create_description_template')
+ @mock.patch('crmsh.report.utils.analyze')
+ @mock.patch('crmsh.report.utils.do_sanitize')
+ def test_process_results(self, mock_sanitize, mock_analyze, mock_create, mock_debug2, mock_run, mock_final):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error = mock.Mock()
+ mock_ctx_inst = mock.Mock(speed_up=False, work_dir="/opt/work_dir", dest_dir="/opt/user", no_compress=False, dest="report", compress_prog="tar", compress_suffix=".bz2")
+ core.process_results(mock_ctx_inst)
+ mock_sanitize.assert_called_once_with(mock_ctx_inst)
+ mock_analyze.assert_called_once_with(mock_ctx_inst)
+ mock_create.assert_called_once_with(mock_ctx_inst)
+ mock_final.assert_called_once_with(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.utils.print_traceback')
+ @mock.patch('crmsh.report.core.getmembers')
+ @mock.patch('multiprocessing.cpu_count')
+ @mock.patch('multiprocessing.Pool')
+ def test_collect_logs_and_info(self, mock_pool, mock_cpu_count, mock_getmember, mock_print):
+ mock_cpu_count.return_value = 4
+ mock_pool_inst = mock.Mock()
+ mock_pool.return_value = mock_pool_inst
+ mock_pool_inst.apply_async = mock.Mock()
+ mock_async_inst1 = mock.Mock()
+ mock_async_inst2 = mock.Mock()
+ mock_pool_inst.apply_async.side_effect = [mock_async_inst1, mock_async_inst2]
+ mock_async_inst1.get = mock.Mock()
+ mock_async_inst2.get = mock.Mock(side_effect=ValueError)
+ mock_pool_inst.close = mock.Mock()
+ mock_pool_inst.join = mock.Mock()
+ mock_getmember.return_value = [("collect_func1", None), ("collect_func2", None)]
+ collect.collect_func1 = mock.Mock()
+ collect.collect_func2 = mock.Mock()
+ mock_ctx_inst = mock.Mock()
+
+ core.collect_logs_and_info(mock_ctx_inst)
+ mock_pool.assert_called_once_with(3)
+
+ @mock.patch('multiprocessing.Process')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.report.core.start_collector')
+ def test_collect_for_nodes(self, mock_start_collector, mock_info, mock_process):
+ mock_ctx_inst = mock.Mock(
+ node_list=["node1", "node2"],
+ ssh_askpw_node_list=["node2"],
+ ssh_user=""
+ )
+ mock_process_inst = mock.Mock()
+ mock_process.return_value = mock_process_inst
+ core.collect_for_nodes(mock_ctx_inst)
+
+ def test_process_arguments_value_error(self):
+ mock_ctx_inst = mock.Mock(from_time=123, to_time=100)
+ with self.assertRaises(ValueError) as err:
+ core.process_arguments(mock_ctx_inst)
+ self.assertEqual("The start time must be before the finish time", str(err.exception))
+
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list_exception(self, mock_list_nodes):
+ mock_ctx_inst = mock.Mock(node_list=[])
+ mock_list_nodes.return_value = []
+ with self.assertRaises(utils.ReportGenericError) as err:
+ core.process_node_list(mock_ctx_inst)
+ self.assertEqual("Could not figure out a list of nodes; is this a cluster node?", str(err.exception))
+
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list_single(self, mock_list_nodes):
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], single=True, me="node1")
+ core.process_node_list(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.ping_node')
+ @mock.patch('crmsh.utils.list_cluster_nodes')
+ def test_process_node_list(self, mock_list_nodes, mock_ping, mock_error):
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], single=False, me="node1")
+ mock_ping.side_effect = ValueError("error")
+ core.process_node_list(mock_ctx_inst)
+ self.assertEqual(mock_ctx_inst.node_list, ["node1"])
+
+ @mock.patch('crmsh.report.core.process_node_list')
+ @mock.patch('crmsh.report.core.process_dest')
+ def test_process_arguments(self, mock_dest, mock_node_list):
+ mock_ctx_inst = mock.Mock(from_time=123, to_time=150)
+ core.process_arguments(mock_ctx_inst)
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('crmsh.report.core.userdir.getuser')
+ @mock.patch('crmsh.report.core.userdir.get_sudoer')
+ def test_find_ssh_user_not_found(self, mock_get_sudoer, mock_getuser, mock_check_ssh, mock_logger):
+ mock_get_sudoer.return_value = ""
+ mock_getuser.return_value = "user2"
+ mock_check_ssh.return_value = True
+ mock_ctx_inst = mock.Mock(ssh_user="", ssh_askpw_node_list=[], node_list=["node1", "node2"], me="node1")
+ core.find_ssh_user(mock_ctx_inst)
+ mock_logger.warning.assert_called_once_with(f"passwordless ssh to node(s) ['node2'] does not work")
+
+ @mock.patch('crmsh.report.core.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.check_ssh_passwd_need')
+ @mock.patch('crmsh.utils.this_node')
+ @mock.patch('crmsh.report.core.userdir.getuser')
+ @mock.patch('crmsh.report.core.userdir.get_sudoer')
+ def test_find_ssh_user(self, mock_get_sudoer, mock_getuser, mock_this_node, mock_check_ssh, mock_debug, mock_warn, mock_debug2):
+ mock_get_sudoer.return_value = "user1"
+ mock_getuser.return_value = "user2"
+ mock_this_node.return_value = "node1"
+ mock_check_ssh.return_value = False
+ mock_ctx_inst = mock.Mock(ssh_user="", ssh_askpw_node_list=[], node_list=["node1", "node2"])
+ core.find_ssh_user(mock_ctx_inst)
+ self.assertEqual("sudo", mock_ctx_inst.sudo)
+ self.assertEqual("user1", mock_ctx_inst.ssh_user)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ def test_start_collector_return(self, mock_sh_utils, mock_warn):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout_stderr.return_value = (0, '', None)
+ mock_ctx_inst = mock.Mock(me="node1")
+ core.start_collector("node1", mock_ctx_inst)
+ mock_sh_utils_inst.get_stdout_stderr.assert_called_once_with(f"{constants.BIN_COLLECTOR} '{mock_ctx_inst}'")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ @mock.patch('crmsh.report.core.sh.LocalShell')
+ @mock.patch('crmsh.utils.this_node')
+ def test_start_collector_warn(self, mock_this_node, mock_sh, mock_sh_utils, mock_warn):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout = mock.Mock()
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_rc_stdout_stderr.return_value = (1, '', "error")
+ mock_ctx_inst = mock.Mock(ssh_user='', sudo='')
+ mock_this_node.return_value = "node2"
+ core.start_collector("node1", mock_ctx_inst)
+ mock_warn.assert_called_once_with("error")
+
+ @mock.patch('ast.literal_eval')
+ @mock.patch('crmsh.report.core.sh.LocalShell')
+ @mock.patch('crmsh.report.core.ShellUtils')
+ @mock.patch('crmsh.utils.this_node')
+ def test_start_collector(self, mock_this_node, mock_sh_utils, mock_sh, mock_eval):
+ mock_sh_utils_inst = mock.Mock()
+ mock_sh_utils.return_value = mock_sh_utils_inst
+ mock_sh_utils_inst.get_stdout = mock.Mock()
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_rc_stdout_stderr.return_value = (0, f"line1\n{constants.COMPRESS_DATA_FLAG}data", None)
+ mock_ctx_inst = mock.Mock(ssh_user='', sudo='')
+ mock_this_node.return_value = "node2"
+ mock_eval.return_value = "data"
+ core.start_collector("node1", mock_ctx_inst)
diff --git a/test/unittests/test_report_utils.py b/test/unittests/test_report_utils.py
new file mode 100644
index 0000000..aa28563
--- /dev/null
+++ b/test/unittests/test_report_utils.py
@@ -0,0 +1,862 @@
+import sys
+import datetime
+from crmsh import config
+from crmsh import utils as crmutils
+from crmsh.report import utils, constants
+import crmsh.log
+
+import unittest
+from unittest import mock
+
+
+class TestPackage(unittest.TestCase):
+
+ @mock.patch('crmsh.report.utils.get_pkg_mgr')
+ def setUp(self, mock_get_pkg_mgr):
+ mock_get_pkg_mgr.side_effect = [None, "rpm", "deb"]
+ self.inst_none = utils.Package("xxx1 xxx2")
+ self.inst = utils.Package("rpm1 rpm2")
+ self.inst_deb = utils.Package("deb1 deb2")
+
+ def test_version_return(self):
+ res = self.inst_none.version()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.Package.pkg_ver_rpm')
+ def test_version(self, mock_ver_rpm):
+ mock_ver_rpm.return_value = "version1"
+ res = self.inst.version()
+ self.assertEqual(res, "version1")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_version_rpm(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "rpm1-4.5.0\nrpm2 not installed"
+ mock_run_inst.get_stdout_stderr.return_value = (0, data, None)
+ res = self.inst.pkg_ver_rpm()
+ self.assertEqual(res, "rpm1-4.5.0")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_version_deb(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ data = "deb1-4.5.0\nno packages found"
+ mock_run_inst.get_stdout_stderr.return_value = (0, data, None)
+ res = self.inst_deb.pkg_ver_deb()
+ self.assertEqual(res, "deb1-4.5.0")
+
+ def test_verify_return(self):
+ res = self.inst_none.verify()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.Package.verify_rpm')
+ def test_verify(self, mock_verify_rpm):
+ mock_verify_rpm.return_value = ""
+ res = self.inst.verify()
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_verify_rpm(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "verify data\nThis is not installed","")
+ res = self.inst.verify_rpm()
+ self.assertEqual(res, "verify data")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_verify_deb(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "verify data\nThis is not installed","")
+ res = self.inst_deb.verify_deb()
+ self.assertEqual(res, "verify data")
+
+
+class TestSanitizer(unittest.TestCase):
+
+ def setUp(self):
+ mock_ctx_inst_no_sanitize = mock.Mock(sanitize=False)
+ self.s_inst_no_sanitize = utils.Sanitizer(mock_ctx_inst_no_sanitize)
+
+ mock_ctx_inst_no_sanitize_set = mock.Mock(sensitive_regex_list=[])
+ self.s_inst_no_sanitize_set = utils.Sanitizer(mock_ctx_inst_no_sanitize_set)
+
+ mock_ctx_inst = mock.Mock(sanitize=True, work_dir="/opt", sensitive_regex_list=["test_patt"])
+ self.s_inst = utils.Sanitizer(mock_ctx_inst)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare_return(self, mock_load_cib, mock_parse, mock_extract, mock_include, mock_warning):
+ mock_include.return_value = True
+ self.s_inst_no_sanitize.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+ mock_warning.assert_has_calls([
+ mock.call("Some PE/CIB/log files contain possibly sensitive data"),
+ mock.call("Using \"-s\" option can replace sensitive data")
+ ])
+
+ @mock.patch('crmsh.report.utils.Sanitizer._get_file_list_in_work_dir')
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare(self, mock_load_cib, mock_parse, mock_extract, mock_include, mock_get_file):
+ mock_include.return_value = True
+ self.s_inst.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+ mock_get_file.assert_called_once_with
+
+ @mock.patch('crmsh.report.utils.Sanitizer._include_sensitive_data')
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_sensitive_value_list')
+ @mock.patch('crmsh.report.utils.Sanitizer._parse_sensitive_set')
+ @mock.patch('crmsh.report.utils.Sanitizer._load_cib_from_work_dir')
+ def test_prepare_no_sensitive_data(self, mock_load_cib, mock_parse, mock_extract, mock_include):
+ mock_include.return_value = False
+ self.s_inst.prepare()
+ mock_load_cib.assert_called_once_with()
+ mock_parse.assert_called_once_with()
+
+ def test_include_sensitive_data(self):
+ res = self.s_inst._include_sensitive_data()
+ self.assertEqual(res, [])
+
+ @mock.patch('os.walk')
+ def test_get_file_list_in_work_dir(self, mock_walk):
+ mock_walk.return_value = [
+ ("/opt", [], ["file1", "file2"]),
+ ("/opt/dir1", [], ["file3"]),
+ ]
+ self.s_inst._get_file_list_in_work_dir()
+ self.assertEqual(self.s_inst.file_list_in_workdir, ['/opt/file1', '/opt/file2', '/opt/dir1/file3'])
+
+ @mock.patch('glob.glob')
+ def test_load_cib_from_work_dir_no_cib(self, mock_glob):
+ mock_glob.return_value = []
+ with self.assertRaises(utils.ReportGenericError) as err:
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(f"CIB file {constants.CIB_F} was not collected", str(err.exception))
+
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_load_cib_from_work_dir_empty(self, mock_read, mock_glob):
+ mock_glob.return_value = [f"/opt/node1/{constants.CIB_F}"]
+ mock_read.return_value = None
+ with self.assertRaises(utils.ReportGenericError) as err:
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(f"File /opt/node1/{constants.CIB_F} is empty", str(err.exception))
+ mock_read.assert_called_once_with(f"/opt/node1/{constants.CIB_F}")
+
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_load_cib_from_work_dir(self, mock_read, mock_glob):
+ mock_glob.return_value = [f"/opt/node1/{constants.CIB_F}"]
+ mock_read.return_value = "data"
+ self.s_inst._load_cib_from_work_dir()
+ self.assertEqual(self.s_inst.cib_data, "data")
+ mock_read.assert_called_once_with(f"/opt/node1/{constants.CIB_F}")
+
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_parse_sensitive_set_no_set(self, mock_logger):
+ config.report.sanitize_rule = ""
+ self.s_inst_no_sanitize_set._parse_sensitive_set()
+ self.assertEqual(self.s_inst_no_sanitize_set.sensitive_regex_set, set(utils.Sanitizer.DEFAULT_RULE_LIST))
+ mock_logger.debug2.assert_called_once_with(f"Regex set to match sensitive data: {set(utils.Sanitizer.DEFAULT_RULE_LIST)}")
+
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_parse_sensitive_set(self, mock_logger):
+ config.report.sanitize_rule = "passw.*"
+ self.s_inst._parse_sensitive_set()
+ self.assertEqual(self.s_inst.sensitive_regex_set, set(['test_patt', 'passw.*']))
+ mock_logger.debug2.assert_called_once_with(f"Regex set to match sensitive data: {set(['test_patt', 'passw.*'])}")
+
+ def test_sanitize_return(self):
+ self.s_inst_no_sanitize.sanitize()
+
+ @mock.patch('crmsh.report.utils.write_to_file')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.report.utils.Sanitizer._sub_sensitive_string')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_sanitize(self, mock_read, mock_sub, mock_debug, mock_write):
+ self.s_inst.file_list_in_workdir = ["file1", "file2"]
+ mock_read.side_effect = [None, "data"]
+ mock_sub.return_value = "replace_data"
+ self.s_inst.sanitize()
+ mock_debug.assert_called_once_with("Replace sensitive info for %s", "file2")
+
+ def test_extract_from_cib(self):
+ self.s_inst.cib_data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-password" name="password" value="qwertyui"/>
+ </utilization>
+ """
+ res = self.s_inst._extract_from_cib("passw.*")
+ self.assertEqual(res, ["qwertyui"])
+
+ def test_sub_sensitive_string(self):
+ data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-TEL" name="TEL" value="13356789876"/>
+ <nvpair id="nodes-1-utilization-password" name="password" value="qwertyui"/>
+ </utilization>
+ This my tel 13356789876
+ """
+ self.s_inst.sensitive_value_list_with_raw_option = ["13356789876"]
+ self.s_inst.sensitive_key_list = ["passw.*"]
+ self.s_inst.sensitive_value_list = ["qwertyui"]
+ res = self.s_inst._sub_sensitive_string(data)
+ expected_data = """
+ <utilization id="nodes-1-utilization">
+ <nvpair id="nodes-1-utilization-TEL" name="TEL" value="******"/>
+ <nvpair id="nodes-1-utilization-password" name="password" value="******"/>
+ </utilization>
+ This my tel ******
+ """
+ self.assertEqual(res, expected_data)
+
+ @mock.patch('logging.Logger.warning')
+ def test_extract_sensitive_value_list_warn(self, mock_warn):
+ self.s_inst.sensitive_regex_set = set(["TEL:test"])
+ self.s_inst._extract_sensitive_value_list()
+ mock_warn.assert_called_once_with("For sanitize pattern TEL:test, option should be \"raw\"")
+
+ @mock.patch('crmsh.report.utils.Sanitizer._extract_from_cib')
+ def test_extract_sensitive_value_list(self, mock_extract):
+ mock_extract.side_effect = [["123456"], ["qwertyui"]]
+ self.s_inst.sensitive_regex_set = set(["TEL:raw", "passw.*"])
+ self.s_inst._extract_sensitive_value_list()
+
+class TestUtils(unittest.TestCase):
+
+ @mock.patch('builtins.sorted', side_effect=lambda x, *args, **kwargs: x[::-1])
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('glob.glob')
+ @mock.patch('crmsh.report.utils.is_our_log')
+ def test_arch_logs(self, mock_is_our_log, mock_glob, mock_logger, mock_timespan, mock_sorted):
+ mock_is_our_log.return_value = utils.LogType.GOOD
+ mock_glob.return_value = []
+ mock_ctx_inst = mock.Mock()
+ mock_timespan.return_value = "0101-0202"
+
+ return_list, log_type = utils.arch_logs(mock_ctx_inst, "file1")
+
+ self.assertEqual(return_list, ["file1"])
+ self.assertEqual(log_type, utils.LogType.GOOD)
+ mock_logger.debug2.assert_called_once_with("Found logs ['file1'] in 0101-0202")
+
+ @mock.patch('sys.stdout.flush')
+ @mock.patch('traceback.print_exc')
+ def test_print_traceback(self, mock_trace, mock_flush):
+ utils.print_traceback()
+ mock_trace.assert_called_once_with()
+
+ @mock.patch('crmsh.report.utils.ts_to_str')
+ def test_get_timespan_str(self, mock_ts_to_str):
+ mock_ctx_inst = mock.Mock(from_time=1691938980.0, to_time=1691982180.0)
+ mock_ts_to_str.side_effect = ["2023-08-13 23:03", "2023-08-14 11:03"]
+ res = utils.get_timespan_str(mock_ctx_inst)
+ self.assertEqual(res, "2023-08-13 23:03 - 2023-08-14 11:03")
+ mock_ts_to_str.assert_has_calls([
+ mock.call(mock_ctx_inst.from_time),
+ mock.call(mock_ctx_inst.to_time)
+ ])
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_get_cmd_output(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "stdout_data", "stderr_data")
+ res = utils.get_cmd_output("cmd")
+ self.assertEqual(res, "stdout_data\nstderr_data\n")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("cmd", timeout=None)
+
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_empty(self, mock_read):
+ mock_read.return_value = None
+ mock_ctx_inst = mock.Mock()
+ res = utils.is_our_log(mock_ctx_inst, "/opt/logfile")
+ self.assertEqual(res, utils.LogType.EMPTY)
+ mock_read.assert_called_once_with("/opt/logfile")
+
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_irregular(self, mock_read, mock_log_format):
+ mock_read.return_value = "This is the log"
+ mock_ctx_inst = mock.Mock()
+ mock_log_format.return_value = None
+ res = utils.is_our_log(mock_ctx_inst, "/opt/logfile")
+ self.assertEqual(res, utils.LogType.IRREGULAR)
+ mock_read.assert_called_once_with("/opt/logfile")
+ mock_log_format.assert_called_once_with(mock_read.return_value)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_before(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=1600, to_time=1800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.BEFORE_TIMESPAN)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_good(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=1200, to_time=1800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.GOOD)
+
+ @mock.patch('crmsh.report.utils.find_first_timestamp')
+ @mock.patch('crmsh.report.utils.head')
+ @mock.patch('crmsh.report.utils.determin_log_format')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_is_our_log_after(self, mock_read, mock_determine, mock_head, mock_find_first):
+ mock_read.return_value = "data"
+ mock_determine.return_value = "rfc5424"
+ mock_find_first.side_effect = [1000, 1500]
+ mock_ctx_inst = mock.Mock(from_time=200, to_time=800)
+ res = utils.is_our_log(mock_ctx_inst, "/var/log/pacemaker.log")
+ self.assertEqual(res, utils.LogType.AFTER_TIMESPAN)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('shutil.which')
+ def test_get_pkg_mgr_unknown(self, mock_which, mock_warning):
+ mock_which.side_effect = [False, False]
+ self.assertEqual(utils.get_pkg_mgr(), "")
+ mock_warning.assert_called_once_with("Unknown package manager!")
+
+ @mock.patch('shutil.which')
+ def test_get_pkg_mgr(self, mock_which):
+ mock_which.return_value = True
+ utils.get_pkg_mgr()
+ self.assertEqual(utils.get_pkg_mgr(), "rpm")
+
+ @mock.patch('os.walk')
+ @mock.patch('os.stat')
+ @mock.patch('os.path.isdir')
+ def test_find_files_in_timespan(self, mock_isdir, mock_stat, mock_walk):
+ mock_isdir.side_effect = [True, False]
+ mock_stat.return_value = mock.Mock(st_ctime=1615)
+ mock_walk.return_value = [
+ ('/mock_dir', [], ['file1.txt', 'file2.txt'])
+ ]
+ mock_ctx_inst = mock.Mock(from_time=1611, to_time=1620)
+
+ res = utils.find_files_in_timespan(mock_ctx_inst, ['/mock_dir', '/not_exist'])
+
+ expected_result = ['/mock_dir/file1.txt', '/mock_dir/file2.txt']
+ self.assertEqual(res, expected_result)
+
+ @mock.patch('crmsh.report.utils.get_timespan_str')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_return(self, mock_arch, mock_debug, mock_timespan):
+ mock_arch.return_value = [[], ""]
+ mock_ctx_inst = mock.Mock()
+ utils.dump_logset(mock_ctx_inst, "file")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_irrgular(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1"], utils.LogType.IRREGULAR]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ mock_basename.return_value = "file1"
+ mock_print.return_value = "data"
+ utils.dump_logset(mock_ctx_inst, "file1")
+ mock_print.assert_called_once_with("file1", 0, 0)
+ mock_str2file.assert_called_once_with("data", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset_one(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1"], utils.LogType.GOOD]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", from_time=10, to_time=20)
+ mock_basename.return_value = "file1"
+ mock_print.return_value = "data"
+
+ utils.dump_logset(mock_ctx_inst, "file1")
+
+ mock_print.assert_called_once_with("file1", 10, 20)
+ mock_str2file.assert_called_once_with("data", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.report.utils.real_path')
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('os.path.basename')
+ @mock.patch('crmsh.report.utils.print_logseg')
+ @mock.patch('crmsh.report.utils.arch_logs')
+ def test_dump_logset(self, mock_arch, mock_print, mock_basename, mock_str2file, mock_debug, mock_real_path):
+ mock_real_path.return_value = "file1"
+ mock_arch.return_value = [["file1", "file2", "file3"], utils.LogType.GOOD]
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir", from_time=10, to_time=20)
+ mock_basename.return_value = "file1"
+ mock_print.side_effect = ["data1\n", "data2\n", "data3\n"]
+
+ utils.dump_logset(mock_ctx_inst, "file1")
+
+ mock_print.assert_has_calls([
+ mock.call("file3", 10, 0),
+ mock.call("file2", 0, 0),
+ mock.call("file1", 0, 20),
+ ])
+ mock_str2file.assert_called_once_with("data1\ndata2\ndata3", "/opt/work_dir/file1")
+ mock_debug.assert_called_once_with("Dump file1 into file1")
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_get_distro_info(self, mock_debug2, mock_exists, mock_read):
+ mock_exists.return_value = True
+ mock_read.return_value = """
+VERSION_ID="20230629"
+PRETTY_NAME="openSUSE Tumbleweed"
+ANSI_COLOR="0;32"
+ """
+ res = utils.get_distro_info()
+ self.assertEqual(res, "openSUSE Tumbleweed")
+
+ @mock.patch('shutil.which')
+ @mock.patch('crmsh.report.utils.sh.LocalShell')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ def test_get_distro_info_lsb(self, mock_debug2, mock_exists, mock_sh, mock_which):
+ mock_which.return_value = True
+ mock_exists.return_value = False
+ mock_sh_inst = mock.Mock()
+ mock_sh.return_value = mock_sh_inst
+ mock_sh_inst.get_stdout_or_raise_error.return_value = "data"
+ res = utils.get_distro_info()
+ self.assertEqual(res, "Unknown")
+
+ @mock.patch('crmsh.report.utils.get_timestamp')
+ def test_find_first_timestamp_none(self, mock_get_timestamp):
+ mock_get_timestamp.side_effect = [None, None]
+ data = ["line1", "line2"]
+ self.assertIsNone(utils.find_first_timestamp(data, "file1"))
+ mock_get_timestamp.assert_has_calls([
+ mock.call("line1", "file1"),
+ mock.call("line2", "file1")
+ ])
+
+ @mock.patch('crmsh.report.utils.get_timestamp')
+ def test_find_first_timestamp(self, mock_get_timestamp):
+ mock_get_timestamp.return_value = 123456
+ data = ["line1", "line2"]
+ res = utils.find_first_timestamp(data, "file1")
+ self.assertEqual(res, 123456)
+ mock_get_timestamp.assert_called_once_with("line1", "file1")
+
+ def test_filter_lines(self):
+ data = """line1
+line2
+line3
+line4
+line5
+ """
+ res = utils.filter_lines(data, 2, 4)
+ self.assertEqual(res, 'line2\nline3\nline4\n')
+
+ @mock.patch('crmsh.utils.parse_time')
+ @mock.patch('crmsh.report.utils.head')
+ def test_determin_log_format_none(self, mock_head, mock_parse):
+ mock_head.return_value = ["line1", "line2"]
+ mock_parse.side_effect = [None, None]
+ data = """line1
+line2
+ """
+ self.assertIsNone(utils.determin_log_format(data))
+
+ def test_determin_log_format_rfc5424(self):
+ data = """
+2003-10-11T22:14:15.003Z mymachine.example.com su
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "rfc5424")
+
+ def test_determin_log_format_syslog(self):
+ data = """
+Feb 12 18:30:08 15sp1-1 kernel:
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "syslog")
+
+ @mock.patch('crmsh.utils.parse_time')
+ @mock.patch('crmsh.report.utils.head')
+ def test_determin_log_format_legacy(self, mock_head, mock_parse):
+ mock_head.return_value = ["Legacy 2003-10-11T22:14:15.003Z log"]
+ mock_parse.side_effect = [None, None, 123456]
+ data = """
+Legacy 003-10-11T22:14:15.003Z log data log
+ """
+ res = utils.determin_log_format(data)
+ self.assertEqual(res, "legacy")
+ mock_parse.assert_has_calls([
+ mock.call("Legacy 2003-10-11T22:14:15.003Z log", quiet=True),
+ mock.call("Legacy", quiet=True),
+ mock.call("2003-10-11T22:14:15.003Z", quiet=True)
+ ])
+
+ def test_get_timestamp_none(self):
+ self.assertIsNone(utils.get_timestamp("", "file1"))
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_rfc5424(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "rfc5424"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("2003-10-11T22:14:15.003Z mymachine.example.com su", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("2003-10-11T22:14:15.003Z", "rfc5424", "file1")
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_syslog(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "syslog"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("Feb 12 18:30:08 15sp1-1 kernel:", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("Feb 12 18:30:08", "syslog", "file1")
+
+ @mock.patch('crmsh.report.utils.get_timestamp_from_time_line')
+ def test_get_timespan_legacy(self, mock_get_timestamp):
+ constants.STAMP_TYPE = "legacy"
+ mock_get_timestamp.return_value = 12345
+ res = utils.get_timestamp("legacy 2003-10-11T22:14:15.003Z log data", "file1")
+ self.assertEqual(res, mock_get_timestamp.return_value)
+ mock_get_timestamp.assert_called_once_with("2003-10-11T22:14:15.003Z", "legacy", "file1")
+
+ @mock.patch('crmsh.report.utils.diff_check')
+ def test_do_compare(self, mock_diff):
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir", node_list=["node1", "node2"])
+ mock_diff.side_effect = [[0, ""], [0, ""]]
+ rc, out = utils.do_compare(mock_ctx_inst, "file1")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+ mock_diff.assert_called_once_with("/opt/workdir/node1/file1", "/opt/workdir/node2/file1")
+
+ @mock.patch('os.path.isfile')
+ def test_diff_check_return(self, mock_isfile):
+ mock_isfile.return_value = False
+ rc, out = utils.diff_check("/opt/file1", "/opt/fil2")
+ self.assertEqual(rc, 1)
+ self.assertEqual(out, "/opt/file1 does not exist\n")
+
+ @mock.patch('crmsh.report.utils.cib_diff')
+ @mock.patch('os.path.basename')
+ @mock.patch('os.path.isfile')
+ def test_diff_check(self, mock_isfile, mock_basename, mock_cib_diff):
+ mock_isfile.side_effect = [True, True]
+ mock_basename.return_value = "cib.xml"
+ mock_cib_diff.return_value = (0, "")
+ rc, out = utils.diff_check("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ def test_txt_diff(self, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_stderr.return_value = (0, "", None)
+ rc, out = utils.txt_diff("txt1", "txt2")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+
+ @mock.patch('os.path.isfile')
+ def test_cib_diff_not_running(self, mock_isfile):
+ mock_isfile.side_effect = [True, False, False, True]
+ rc, out = utils.cib_diff("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 1)
+ self.assertEqual(out, "Can't compare cibs from running and stopped systems\n")
+
+ @mock.patch('crmsh.report.utils.ShellUtils')
+ @mock.patch('os.path.isfile')
+ def test_cib_diff(self, mock_isfile, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_isfile.side_effect = [True, True]
+ mock_run_inst.get_stdout_stderr.return_value = (0, "", None)
+ rc, out = utils.cib_diff("/opt/node1/cib.xml", "/opt/node2/cib.xml")
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, "")
+ mock_run_inst.get_stdout_stderr.assert_called_once_with("crm_diff -c -n /opt/node1/cib.xml -o /opt/node2/cib.xml")
+
+ @mock.patch('os.symlink')
+ @mock.patch('shutil.move')
+ @mock.patch('os.remove')
+ @mock.patch('os.path.isfile')
+ def test_consolidate(self, mock_isfile, mock_remove, mock_move, mock_symlink):
+ mock_isfile.side_effect = [True, False]
+ mock_ctx_inst = mock.Mock(node_list=["node1", "node2"], work_dir="/opt/workdir")
+ utils.consolidate(mock_ctx_inst, "target_file")
+ mock_isfile.assert_has_calls([
+ mock.call("/opt/workdir/target_file"),
+ mock.call("/opt/workdir/target_file")
+ ])
+ mock_symlink.assert_has_calls([
+ mock.call('../target_file', '/opt/workdir/node1/target_file'),
+ mock.call('../target_file', '/opt/workdir/node2/target_file')
+ ])
+
+ @mock.patch('crmsh.report.utils.Sanitizer')
+ def test_do_sanitize(self, mock_sanitizer):
+ mock_inst = mock.Mock()
+ mock_sanitizer.return_value = mock_inst
+ mock_ctx_inst = mock.Mock()
+ utils.do_sanitize(mock_ctx_inst)
+ mock_inst.prepare.assert_called_once_with()
+ mock_inst.sanitize.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg_empty(self, mock_read):
+ mock_read.return_value = ""
+ res = utils.print_logseg("log1", 1234, 0)
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.findln_by_timestamp')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg_none(self, mock_read, mock_findln):
+ mock_read.return_value = "data"
+ mock_findln.return_value = None
+ res = utils.print_logseg("log1", 1234, 0)
+ self.assertEqual(res, "")
+
+ @mock.patch('crmsh.report.utils.filter_lines')
+ @mock.patch('crmsh.report.utils.logger', spec=crmsh.log.DEBUG2Logger)
+ @mock.patch('crmsh.report.utils.findln_by_timestamp')
+ @mock.patch('crmsh.utils.read_from_file')
+ def test_print_logseg(self, mock_read, mock_findln, mock_logger, mock_filter):
+ mock_read.return_value = "line1\nline2\nline3"
+ mock_filter.return_value = "line1\nline2\nline3"
+ res = utils.print_logseg("log1", 0, 0)
+ self.assertEqual(res, mock_filter.return_value)
+ mock_logger.debug2.assert_called_once_with("Including segment [%d-%d] from %s", 1, 3, "log1")
+
+ def test_head(self):
+ data = "line1\nline2\nline3"
+ res = utils.head(2, data)
+ self.assertEqual(res, ["line1", "line2"])
+
+ def test_tail(self):
+ data = "line1\nline2\nline3"
+ res = utils.tail(2, data)
+ self.assertEqual(res, ["line2", "line3"])
+
+ @mock.patch('crmsh.utils.get_open_method')
+ @mock.patch('builtins.open', create=True)
+ def test_write_to_file(self, mock_open, mock_method):
+ mock_method.return_value = mock_open
+ file_handle = mock_open.return_value.__enter__.return_value
+ utils.write_to_file('Hello', 'file.txt')
+ mock_open.assert_called_once_with('file.txt', 'w')
+ file_handle.write.assert_called_once_with('Hello')
+
+ @mock.patch('gzip.open')
+ @mock.patch('crmsh.utils.get_open_method')
+ def test_write_to_file_encode(self, mock_method, mock_open):
+ mock_method.return_value = mock_open
+ file_handle = mock_open.return_value.__enter__.return_value
+ utils.write_to_file('Hello', 'file.txt')
+ mock_open.assert_called_once_with('file.txt', 'w')
+ file_handle.write.assert_called_once_with(b'Hello')
+
+ @mock.patch('crmsh.report.utils.dt_to_str')
+ @mock.patch('crmsh.report.utils.ts_to_dt')
+ def test_ts_to_str(self, mock_ts_to_dt, mock_dt_to_str):
+ mock_ts_to_dt.return_value = datetime.datetime(2020, 2, 19, 21, 44, 7, 977355)
+ mock_dt_to_str.return_value = "2020-02-19 21:44"
+ res = utils.ts_to_str(1693519260.0)
+ self.assertEqual(res, mock_dt_to_str.return_value)
+
+ def test_ts_to_dt(self):
+ res = utils.ts_to_dt(crmutils.parse_to_timestamp("2023-09-01 06:01"))
+ self.assertEqual(utils.dt_to_str(res), "2023-09-01 06:01:00")
+
+ def test_now(self):
+ expected_res = datetime.datetime.now().strftime(constants.TIME_FORMAT)
+ res = utils.now()
+ self.assertEqual(res, expected_res)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('os.path.isfile')
+ @mock.patch('crmsh.report.utils.now')
+ def test_create_description_template(self, mock_now, mock_isfile, mock_read, mock_str2file):
+ mock_now.return_value = "2023-09-01 06:01"
+ sys.argv = ["crm", "report", "option1"]
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/workdir")
+ mock_isfile.return_value = True
+ mock_read.return_value = "data"
+ utils.create_description_template(mock_ctx_inst)
+
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.report.utils.extract_critical_log')
+ @mock.patch('crmsh.report.utils.check_collected_files')
+ @mock.patch('crmsh.report.utils.compare_and_consolidate_files')
+ @mock.patch('glob.glob')
+ def test_analyze(self, mock_glob, mock_compare, mock_check_collected, mock_extract, mock_str2file):
+ mock_compare.return_value = "data"
+ mock_check_collected.return_value = ""
+ mock_extract.return_value = ""
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ utils.analyze(mock_ctx_inst)
+ mock_str2file.assert_called_once_with("data", f"/opt/work_dir/{constants.ANALYSIS_F}")
+
+ @mock.patch('crmsh.report.utils.consolidate')
+ @mock.patch('crmsh.report.utils.do_compare')
+ @mock.patch('glob.glob')
+ def test_compare_and_consolidate_files(self, mock_glob, mock_compare, mock_consolidate):
+ mock_ctx_inst = mock.Mock(work_dir="/opt/work_dir")
+ mock_glob.side_effect = [False, True, True, True, True]
+ mock_compare.side_effect = [(0, ""), (0, ""), (0, ""), (0, "")]
+ res = utils.compare_and_consolidate_files(mock_ctx_inst)
+ self.assertEqual(f"Diff {constants.MEMBERSHIP_F}... no {constants.MEMBERSHIP_F} found in /opt/work_dir\nDiff {constants.CRM_MON_F}... OK\nDiff {constants.COROSYNC_F}... OK\nDiff {constants.SYSINFO_F}... OK\nDiff {constants.CIB_F}... OK\n\n", res)
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('crmsh.utils.file_is_empty')
+ @mock.patch('os.path.isfile')
+ def test_check_collected_files(self, mock_isfile, mock_is_empty, mock_read):
+ mock_ctx_inst = mock.Mock(node_list=["node1"], work_dir="/opt/work_dir")
+ mock_isfile.side_effect = [False, False, True]
+ mock_is_empty.return_value = False
+ mock_read.return_value = "data"
+ res = utils.check_collected_files(mock_ctx_inst)
+ self.assertEqual(res, ["Checking problems with permissions/ownership at node1:", "data"])
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_parse_to_timestamp_none(self, mock_parse, mock_error):
+ mock_parse.return_value = None
+ with self.assertRaises(utils.ReportGenericError) as err:
+ utils.parse_to_timestamp("xxxxx")
+ mock_error.assert_has_calls([
+ mock.call(f"Invalid time string 'xxxxx'"),
+ mock.call('Try these formats like: 2pm; "2019/9/5 12:30"; "09-Sep-07 2:00"; "[1-9][0-9]*[YmdHM]"')
+ ])
+
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_parse_to_timestamp(self, mock_parse, mock_error):
+ mock_parse.return_value = 1234567
+ res = utils.parse_to_timestamp("2023")
+ self.assertEqual(res, mock_parse.return_value)
+
+ def test_parse_to_timestamp_delta(self):
+ timedelta = datetime.timedelta(days=10)
+ expected_timestamp = (datetime.datetime.now() - timedelta).timestamp()
+ res = utils.parse_to_timestamp("10d")
+ self.assertEqual(int(res), int(expected_timestamp))
+
+ @mock.patch('crmsh.utils.read_from_file')
+ @mock.patch('glob.glob')
+ def test_extract_critical_log(self, mock_glob, mock_read):
+ mock_glob.return_value = ["/opt/workdir/pacemaker.log"]
+ mock_read.return_value = """
+line1
+pacemaker-controld[5678]: warning: data
+pacemaker-schedulerd[5677]: error: Resource
+line4
+ """
+ mock_ctx_inst = mock.Mock(work_dir="/opt/workdir")
+ res = utils.extract_critical_log(mock_ctx_inst)
+ expected_data = """
+WARNINGS or ERRORS in pacemaker.log:
+pacemaker-controld[5678]: warning: data
+pacemaker-schedulerd[5677]: error: Resource"""
+ self.assertEqual('\n'.join(res), expected_data)
+
+ def test_findln_by_timestamp_1(self):
+ pacemaker_file_path = "pacemaker.log.2"
+ with open(pacemaker_file_path) as f:
+ data = f.read()
+ data_list = data.split('\n')
+ constants.STAMP_TYPE = utils.determin_log_format(data)
+ first_timestamp = utils.get_timestamp(data_list[0], pacemaker_file_path)
+ middle_timestamp = utils.get_timestamp(data_list[1], pacemaker_file_path)
+ last_timestamp = utils.get_timestamp(data_list[2], pacemaker_file_path)
+ assert first_timestamp < middle_timestamp < last_timestamp
+ line_stamp = crmutils.parse_to_timestamp("Jan 03 11:03:41 2024")
+ result_line = utils.findln_by_timestamp(data, line_stamp, pacemaker_file_path)
+ assert result_line == 2
+ line_stamp = crmutils.parse_to_timestamp("Jan 03 12:03:41 2024")
+ result_line = utils.findln_by_timestamp(data, line_stamp, pacemaker_file_path)
+ assert result_line == 3
+
+ def test_findln_by_timestamp_irregular(self):
+ data = """line1
+ line2
+ line3
+ line4"""
+ target_time = "Apr 03 13:10"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time)
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, "file1")
+ self.assertIsNone(result_line)
+
+ def test_findln_by_timestamp(self):
+ target_time = "Apr 03 13:10"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time+' 2023')
+ with open('pacemaker.log') as f:
+ data = f.read()
+ constants.STAMP_TYPE = utils.determin_log_format(data)
+ pacemaker_file_path = "pacemaker.log"
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, pacemaker_file_path)
+ result_line_stamp = utils.get_timestamp(data.split('\n')[result_line-1], pacemaker_file_path)
+ assert result_line_stamp > target_time_stamp
+ result_pre_line_stamp = utils.get_timestamp(data.split('\n')[result_line-2], pacemaker_file_path)
+ assert result_pre_line_stamp < target_time_stamp
+
+ target_time = "Apr 03 11:01:19"
+ target_time_stamp = crmutils.parse_to_timestamp(target_time+' 2023')
+ result_line = utils.findln_by_timestamp(data, target_time_stamp, pacemaker_file_path)
+ result_time = ' '.join(data.split('\n')[result_line-1].split()[:3])
+ self.assertEqual(result_time, target_time)
+
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_not_syslog(self, mock_parse):
+ mock_parse.return_value = 123456
+ res = utils.get_timestamp_from_time_line("line1", "rfc5424", "file1")
+ self.assertEqual(res, mock_parse.return_value)
+
+ @mock.patch('os.path.getmtime')
+ @mock.patch('crmsh.report.utils.datetime')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_next_year(self, mock_parse, mock_datetime, mock_getmtime):
+ mock_parse.return_value = 8888888888888
+ mock_getmtime.return_value = 1691938980.0
+ mock_datetime.datetime.now.return_value = datetime.datetime(2023, 9, 1, 6, 1)
+ mock_datetime.datetime.fromtimestamp.return_value = datetime.datetime(2024, 9, 1, 6, 1)
+ res = utils.get_timestamp_from_time_line("line1", "syslog", "file1")
+ self.assertIsNone(res)
+
+ @mock.patch('os.path.getmtime')
+ @mock.patch('crmsh.report.utils.datetime')
+ @mock.patch('crmsh.utils.parse_to_timestamp')
+ def test_get_timestamp_from_time_line_that_year(self, mock_parse, mock_datetime, mock_getmtime):
+ mock_parse.return_value = 8888888888888
+ mock_getmtime.return_value = 1691938980.0
+ mock_datetime.datetime.now.return_value = datetime.datetime(2023, 9, 1, 6, 1)
+ mock_datetime.datetime.fromtimestamp.return_value = datetime.datetime(2022, 9, 1, 6, 1)
+ res = utils.get_timestamp_from_time_line("line1", "syslog", "file1")
+ self.assertEqual(res, mock_parse.return_value)
diff --git a/test/unittests/test_sbd.py b/test/unittests/test_sbd.py
new file mode 100644
index 0000000..bc2b50a
--- /dev/null
+++ b/test/unittests/test_sbd.py
@@ -0,0 +1,894 @@
+import os
+import unittest
+import logging
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import bootstrap
+from crmsh import sbd
+
+
+class TestSBDTimeout(unittest.TestCase):
+ """
+ Unitary tests for crmsh.sbd.SBDTimeout
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ _dict = {"sbd.watchdog_timeout": 5, "sbd.msgwait": 10}
+ _inst_q = mock.Mock()
+ self.sbd_timeout_inst = sbd.SBDTimeout(mock.Mock(profiles_dict=_dict, is_s390=True, qdevice_inst=_inst_q))
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_initialize_timeout(self):
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout = mock.Mock()
+ self.sbd_timeout_inst._set_sbd_msgwait = mock.Mock()
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice = mock.Mock()
+ self.sbd_timeout_inst.initialize_timeout()
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout.assert_called_once()
+ self.sbd_timeout_inst._set_sbd_msgwait.assert_not_called()
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice.assert_called_once()
+
+ @mock.patch('logging.Logger.warning')
+ def test_set_sbd_watchdog_timeout(self, mock_warn):
+ self.sbd_timeout_inst._set_sbd_watchdog_timeout()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to %d for s390, it was %d", sbd.SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_S390, 5)
+
+ @mock.patch('logging.Logger.warning')
+ def test_set_sbd_msgwait(self, mock_warn):
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 15
+ self.sbd_timeout_inst._set_sbd_msgwait()
+ mock_warn.assert_called_once_with("sbd msgwait is set to %d, it was %d", 30, 10)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_qdevice_sync_timeout')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_adjust_sbd_watchdog_timeout_with_diskless_and_qdevice_sbd_stage(self, mock_is_configured, mock_is_active, mock_get_sync, mock_warn):
+ mock_is_configured.return_value = True
+ mock_is_active.return_value = True
+ mock_get_sync.return_value = 15
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 5
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to 20 for qdevice, it was 5")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_adjust_sbd_watchdog_timeout_with_diskless_and_qdevice_all(self, mock_is_configured, mock_warn):
+ mock_is_configured.return_value = False
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 5
+ self.sbd_timeout_inst._adjust_sbd_watchdog_timeout_with_diskless_and_qdevice()
+ mock_warn.assert_called_once_with("sbd_watchdog_timeout is set to 35 for qdevice, it was 5")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_sbd_msgwait_exception(self, mock_run):
+ mock_run.return_value = "data"
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDTimeout.get_sbd_msgwait("/dev/sda1")
+ self.assertEqual("Cannot get sbd msgwait for /dev/sda1", str(err.exception))
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump")
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_sbd_msgwait(self, mock_run):
+ mock_run.return_value = """
+ Timeout (loop) : 1
+ Timeout (msgwait) : 10
+ ==Header on disk /dev/sda1 is dumped
+ """
+ res = sbd.SBDTimeout.get_sbd_msgwait("/dev/sda1")
+ assert res == 10
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_watchdog_timeout_exception(self, mock_get):
+ mock_get.return_value = None
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDTimeout.get_sbd_watchdog_timeout()
+ self.assertEqual("Cannot get the value of SBD_WATCHDOG_TIMEOUT", str(err.exception))
+ mock_get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_watchdog_timeout(self, mock_get):
+ mock_get.return_value = 5
+ res = sbd.SBDTimeout.get_sbd_watchdog_timeout()
+ assert res == 5
+ mock_get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_get_stonith_watchdog_timeout_return(self, mock_active):
+ mock_active.return_value = False
+ res = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+ assert res == sbd.SBDTimeout.STONITH_WATCHDOG_TIMEOUT_DEFAULT
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('crmsh.utils.get_property')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ def test_get_stonith_watchdog_timeout(self, mock_active, mock_get_property):
+ mock_active.return_value = True
+ mock_get_property.return_value = "60s"
+ res = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+ assert res == 60
+ mock_active.assert_called_once_with("pacemaker.service")
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.utils.detect_virt')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_delay_start_expected')
+ @mock.patch('crmsh.utils.get_pcmk_delay_max')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_msgwait')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ def test_load_configurations(self, mock_2node, mock_get_sbd_dev, mock_get_msgwait, mock_pcmk_delay, mock_delay_expected, mock_detect, mock_get_sbd_value, mock_debug):
+ mock_2node.return_value = True
+ mock_debug.return_value = False
+ mock_get_sbd_value.return_value = "no"
+ mock_get_sbd_dev.return_value = ["/dev/sda1"]
+ mock_get_msgwait.return_value = 30
+ mock_pcmk_delay.return_value = 30
+
+ self.sbd_timeout_inst._load_configurations()
+
+ mock_2node.assert_called_once_with()
+ mock_get_sbd_dev.assert_called_once_with()
+ mock_get_msgwait.assert_called_once_with("/dev/sda1")
+ mock_pcmk_delay.assert_called_once_with(True)
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ @mock.patch('crmsh.utils.detect_virt')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_delay_start_expected')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config')
+ @mock.patch('crmsh.utils.is_2node_cluster_without_qdevice')
+ def test_load_configurations_diskless(self, mock_2node, mock_get_sbd_dev, mock_get_watchdog_timeout, mock_get_stonith_watchdog_timeout, mock_delay_expected, mock_detect, mock_get_sbd_value, mock_debug):
+ mock_2node.return_value = True
+ mock_debug.return_value = False
+ mock_get_sbd_value.return_value = "no"
+ mock_get_sbd_dev.return_value = []
+ mock_get_watchdog_timeout.return_value = 30
+ mock_get_stonith_watchdog_timeout.return_value = 30
+
+ self.sbd_timeout_inst._load_configurations()
+
+ mock_2node.assert_called_once_with()
+ mock_get_sbd_dev.assert_called_once_with()
+ mock_get_watchdog_timeout.assert_called_once_with()
+ mock_get_stonith_watchdog_timeout.assert_called_once_with()
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ @mock.patch('logging.Logger.debug')
+ def test_get_stonith_timeout_expected(self, mock_debug, mock_general):
+ self.sbd_timeout_inst.disk_based = True
+ self.sbd_timeout_inst.pcmk_delay_max = 30
+ self.sbd_timeout_inst.msgwait = 30
+ mock_general.return_value = 11
+ res = self.sbd_timeout_inst.get_stonith_timeout_expected()
+ assert res == 83
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ @mock.patch('logging.Logger.debug')
+ def test_get_stonith_timeout_expected_diskless(self, mock_debug, mock_general):
+ self.sbd_timeout_inst.disk_based = False
+ self.sbd_timeout_inst.stonith_watchdog_timeout = -1
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 15
+ mock_general.return_value = 11
+ res = self.sbd_timeout_inst.get_stonith_timeout_expected()
+ assert res == 71
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ def test_get_sbd_delay_start_expected(self, mock_corosync):
+ mock_corosync.return_value = 30
+ self.sbd_timeout_inst.disk_based = True
+ self.sbd_timeout_inst.pcmk_delay_max = 30
+ self.sbd_timeout_inst.msgwait = 30
+ res = self.sbd_timeout_inst.get_sbd_delay_start_expected()
+ assert res == 90
+
+ @mock.patch('crmsh.corosync.token_and_consensus_timeout')
+ def test_get_sbd_delay_start_expected_diskless(self, mock_corosync):
+ mock_corosync.return_value = 30
+ self.sbd_timeout_inst.disk_based = False
+ self.sbd_timeout_inst.sbd_watchdog_timeout = 30
+ res = self.sbd_timeout_inst.get_sbd_delay_start_expected()
+ assert res == 90
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_is_sbd_delay_start(self, mock_get_sbd_value):
+ mock_get_sbd_value.return_value = "100"
+ assert sbd.SBDTimeout.is_sbd_delay_start() is True
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ def test_adjust_sbd_delay_start_return(self, mock_update):
+ self.sbd_timeout_inst.sbd_delay_start_value_expected = 100
+ self.sbd_timeout_inst.sbd_delay_start_value_from_config = "100"
+ self.sbd_timeout_inst.adjust_sbd_delay_start()
+ mock_update.assert_not_called()
+
+ @mock.patch('crmsh.sbd.SBDManager.update_configuration')
+ def test_adjust_sbd_delay_start(self, mock_update):
+ self.sbd_timeout_inst.sbd_delay_start_value_expected = 100
+ self.sbd_timeout_inst.sbd_delay_start_value_from_config = "no"
+ self.sbd_timeout_inst.adjust_sbd_delay_start()
+ mock_update.assert_called_once_with({"SBD_DELAY_START": "100"})
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start_no(self, mock_get_sbd_value, mock_run):
+ mock_get_sbd_value.return_value = "no"
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_not_called()
+
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.utils.get_systemd_timeout_start_in_sec')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start_return(self, mock_get_sbd_value, mock_run, mock_get_systemd_sec, mock_mkdirp):
+ mock_get_sbd_value.return_value = "10"
+ mock_run.return_value = "1min 30s"
+ mock_get_systemd_sec.return_value = 90
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_called_once_with("systemctl show -p TimeoutStartUSec sbd --value")
+ mock_get_systemd_sec.assert_called_once_with("1min 30s")
+ mock_mkdirp.assert_not_called()
+
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.str2file')
+ @mock.patch('crmsh.utils.mkdirp')
+ @mock.patch('crmsh.utils.get_systemd_timeout_start_in_sec')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_adjust_systemd_start_timeout_no_delay_start(self, mock_get_sbd_value, mock_run, mock_get_systemd_sec, mock_mkdirp, mock_str2file, mock_csync2, mock_cluster_run):
+ mock_get_sbd_value.return_value = "100"
+ mock_run.return_value = "1min 30s"
+ mock_get_systemd_sec.return_value = 90
+ self.sbd_timeout_inst.adjust_systemd_start_timeout()
+ mock_run.assert_called_once_with("systemctl show -p TimeoutStartUSec sbd --value")
+ mock_get_systemd_sec.assert_called_once_with("1min 30s")
+ mock_mkdirp.assert_called_once_with(bootstrap.SBD_SYSTEMD_DELAY_START_DIR)
+ mock_str2file.assert_called_once_with('[Service]\nTimeoutSec=120', '/etc/systemd/system/sbd.service.d/sbd_delay_start.conf')
+ mock_csync2.assert_called_once_with(bootstrap.SBD_SYSTEMD_DELAY_START_DIR)
+ mock_cluster_run.assert_called_once_with("systemctl daemon-reload")
+
+ @mock.patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_delay_start_sec_from_sysconfig_yes(self, mock_get_sbd_value, mock_get_sbd_timeout):
+ mock_get_sbd_value.return_value = "yes"
+ mock_get_sbd_timeout.return_value = 30
+ assert sbd.SBDTimeout.get_sbd_delay_start_sec_from_sysconfig() == 60
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+ @mock.patch('crmsh.sbd.SBDManager.get_sbd_value_from_config')
+ def test_get_sbd_delay_start_sec_from_sysconfig(self, mock_get_sbd_value):
+ mock_get_sbd_value.return_value = "30"
+ assert sbd.SBDTimeout.get_sbd_delay_start_sec_from_sysconfig() == 30
+ mock_get_sbd_value.assert_called_once_with("SBD_DELAY_START")
+
+
+class TestSBDManager(unittest.TestCase):
+ """
+ Unitary tests for crmsh.sbd.SBDManager
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.sbd_inst = sbd.SBDManager(mock.Mock(sbd_devices=["/dev/sdb1", "/dev/sdc1"], diskless_sbd=False))
+ self.sbd_inst_devices_gt_3 = sbd.SBDManager(mock.Mock(sbd_devices=["/dev/sdb1", "/dev/sdc1", "/dev/sdd1", "/dev/sde1"]))
+ self.sbd_inst_interactive = sbd.SBDManager(mock.Mock(sbd_devices=[], diskless_sbd=False))
+ self.sbd_inst_diskless = sbd.SBDManager(mock.Mock(sbd_devices=[], diskless_sbd=True))
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('logging.Logger.warning')
+ def test_get_sbd_device_interactive_yes_to_all(self, mock_warn):
+ self.sbd_inst._context = mock.Mock(yes_to_all=True)
+ self.sbd_inst._get_sbd_device_interactive()
+ mock_warn.assert_called_once_with(sbd.SBDManager.SBD_WARNING)
+
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('logging.Logger.warning')
+ def test_get_sbd_device_interactive_not_confirm(self, mock_warn, mock_status, mock_confirm):
+ self.sbd_inst._context.yes_to_all = False
+ mock_confirm.return_value = False
+ self.sbd_inst._get_sbd_device_interactive()
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_warn.assert_called_once_with("Not configuring SBD - STONITH will be disabled.")
+
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_already_configured(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_from_config.return_value = ["/dev/sda1"]
+ mock_no_overwrite.return_value = True
+
+ res = self.sbd_inst._get_sbd_device_interactive()
+ self.assertEqual(res, ["/dev/sda1"])
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_has_calls([
+ mock.call("Do you wish to use SBD?"),
+ ])
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_from_config.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_diskless(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_no_overwrite.return_value = False
+ mock_from_config.return_value = []
+ mock_prompt.return_value = "none"
+
+ self.sbd_inst._get_sbd_device_interactive()
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_from_config.assert_called_once_with()
+ mock_prompt.assert_called_once_with('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*')
+
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive_null_and_diskless(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.return_value = True
+ mock_no_overwrite.return_value = False
+ mock_from_config.return_value = []
+ mock_prompt.return_value = "none"
+
+ self.sbd_inst._get_sbd_device_interactive()
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_called_once_with("Do you wish to use SBD?")
+ mock_from_config.assert_called_once_with()
+ mock_prompt.assert_has_calls([
+ mock.call('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*')
+ ])
+
+ @mock.patch('crmsh.utils.re_split_string')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('logging.Logger.error')
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.bootstrap.prompt_for_string')
+ @mock.patch('crmsh.sbd.SBDManager._no_overwrite_check')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.confirm')
+ @mock.patch('logging.Logger.info')
+ def test_get_sbd_device_interactive(self, mock_status, mock_confirm, mock_from_config, mock_no_overwrite, mock_prompt, mock_verify, mock_error_msg, mock_warn, mock_split):
+ self.sbd_inst._context = mock.Mock(yes_to_all=False)
+ mock_confirm.side_effect = [True, False, True]
+ mock_from_config.return_value = []
+ mock_no_overwrite.return_value = False
+ mock_prompt.side_effect = ["/dev/test1", "/dev/sda1", "/dev/sdb1"]
+ mock_split.side_effect = [["/dev/test1"], ["/dev/sda1"], ["/dev/sdb1"]]
+ mock_verify.side_effect = [ValueError("/dev/test1 error"), None, None]
+
+ res = self.sbd_inst._get_sbd_device_interactive()
+ self.assertEqual(res, ["/dev/sdb1"])
+
+ mock_status.assert_called_once_with(sbd.SBDManager.SBD_STATUS_DESCRIPTION)
+ mock_confirm.assert_has_calls([
+ mock.call("Do you wish to use SBD?"),
+ mock.call("Are you sure you wish to use this device?")
+ ])
+ mock_from_config.assert_called_once_with()
+ mock_error_msg.assert_called_once_with("/dev/test1 error")
+ mock_warn.assert_has_calls([
+ mock.call("All data on /dev/sda1 will be destroyed!"),
+ mock.call("All data on /dev/sdb1 will be destroyed!")
+ ])
+ mock_prompt.assert_has_calls([
+ mock.call('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', 'none|\\/.*') for x in range(3)
+ ])
+ mock_split.assert_has_calls([
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/test1"),
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/sda1"),
+ mock.call(sbd.SBDManager.PARSE_RE, "/dev/sdb1"),
+ ])
+
+ def test_verify_sbd_device_gt_3(self):
+ assert self.sbd_inst_devices_gt_3.sbd_devices_input == ["/dev/sdb1", "/dev/sdc1", "/dev/sdd1", "/dev/sde1"]
+ dev_list = self.sbd_inst_devices_gt_3.sbd_devices_input
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst_devices_gt_3._verify_sbd_device(dev_list)
+ self.assertEqual("Maximum number of SBD device is 3", str(err.exception))
+
+ @mock.patch('crmsh.sbd.SBDManager._compare_device_uuid')
+ @mock.patch('crmsh.utils.is_block_device')
+ def test_verify_sbd_device_not_block(self, mock_block_device, mock_compare):
+ assert self.sbd_inst.sbd_devices_input == ["/dev/sdb1", "/dev/sdc1"]
+ dev_list = self.sbd_inst.sbd_devices_input
+ mock_block_device.side_effect = [True, False]
+
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._verify_sbd_device(dev_list)
+ self.assertEqual("/dev/sdc1 doesn't look like a block device", str(err.exception))
+
+ mock_block_device.assert_has_calls([mock.call("/dev/sdb1"), mock.call("/dev/sdc1")])
+ mock_compare.assert_called_once_with("/dev/sdb1", [])
+
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ def test_get_sbd_device_from_option(self, mock_verify):
+ self.sbd_inst._get_sbd_device()
+ mock_verify.assert_called_once_with(['/dev/sdb1', '/dev/sdc1'])
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_interactive')
+ def test_get_sbd_device_from_interactive(self, mock_interactive):
+ mock_interactive.return_value = ["/dev/sdb1", "/dev/sdc1"]
+ self.sbd_inst_interactive._get_sbd_device()
+ mock_interactive.assert_called_once_with()
+
+ def test_get_sbd_device_diskless(self):
+ self.sbd_inst_diskless._get_sbd_device()
+
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('logging.Logger.info')
+ def test_initialize_sbd_return(self, mock_info, mock_sbd_timeout):
+ mock_inst = mock.Mock()
+ mock_sbd_timeout.return_value = mock_inst
+ self.sbd_inst_diskless._context = mock.Mock(profiles_dict={})
+ self.sbd_inst_diskless._initialize_sbd()
+ mock_info.assert_called_once_with("Configuring diskless SBD")
+ mock_inst.initialize_timeout.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.sbd.SBDTimeout')
+ @mock.patch('logging.Logger.info')
+ def test_initialize_sbd(self, mock_info, mock_sbd_timeout, mock_invoke, mock_error):
+ mock_inst = mock.Mock(sbd_msgwait=10, sbd_watchdog_timeout=5)
+ mock_sbd_timeout.return_value = mock_inst
+ mock_inst.set_sbd_watchdog_timeout = mock.Mock()
+ mock_inst.set_sbd_msgwait = mock.Mock()
+ self.sbd_inst._sbd_devices = ["/dev/sdb1", "/dev/sdc1"]
+ mock_invoke.side_effect = [(True, None, None), (False, None, "error")]
+ mock_error.side_effect = ValueError
+
+ with self.assertRaises(ValueError):
+ self.sbd_inst._initialize_sbd()
+
+ mock_invoke.assert_has_calls([
+ mock.call("sbd -4 10 -1 5 -d /dev/sdb1 create"),
+ mock.call("sbd -4 10 -1 5 -d /dev/sdc1 create")
+ ])
+ mock_error.assert_called_once_with("Failed to initialize SBD device /dev/sdc1: error")
+
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.sysconfig_set')
+ @mock.patch('shutil.copyfile')
+ def test_update_configuration(self, mock_copy, mock_sysconfig, mock_update):
+ self.sbd_inst._sbd_devices = ["/dev/sdb1", "/dev/sdc1"]
+ self.sbd_inst._watchdog_inst = mock.Mock(watchdog_device_name="/dev/watchdog")
+ self.sbd_inst.timeout_inst = mock.Mock(sbd_watchdog_timeout=15)
+
+ self.sbd_inst._update_sbd_configuration()
+
+ mock_copy.assert_called_once_with("/usr/share/fillup-templates/sysconfig.sbd", "/etc/sysconfig/sbd")
+ mock_sysconfig.assert_called_once_with("/etc/sysconfig/sbd", SBD_WATCHDOG_DEV='/dev/watchdog', SBD_DEVICE='/dev/sdb1;/dev/sdc1', SBD_WATCHDOG_TIMEOUT="15")
+ mock_update.assert_called_once_with("/etc/sysconfig/sbd")
+
+ @mock.patch('crmsh.bootstrap.utils.parse_sysconfig')
+ def test_get_sbd_device_from_config_none(self, mock_parse):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = None
+
+ res = self.sbd_inst._get_sbd_device_from_config()
+ assert res == []
+
+ mock_parse.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_parse_inst.get.assert_called_once_with("SBD_DEVICE")
+
+ @mock.patch('crmsh.utils.re_split_string')
+ @mock.patch('crmsh.bootstrap.utils.parse_sysconfig')
+ def test_get_sbd_device_from_config(self, mock_parse, mock_split):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = "/dev/sdb1;/dev/sdc1"
+ mock_split.return_value = ["/dev/sdb1", "/dev/sdc1"]
+
+ res = self.sbd_inst._get_sbd_device_from_config()
+ assert res == ["/dev/sdb1", "/dev/sdc1"]
+
+ mock_parse.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_parse_inst.get.assert_called_once_with("SBD_DEVICE")
+ mock_split.assert_called_once_with(sbd.SBDManager.PARSE_RE, "/dev/sdb1;/dev/sdc1")
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_quorum_votes_dict')
+ def test_warn_diskless_sbd_diskless(self, mock_vote, mock_warn):
+ self.sbd_inst_diskless._context = mock.Mock(cluster_is_running=False)
+ self.sbd_inst_diskless._warn_diskless_sbd()
+ mock_vote.assert_not_called()
+ mock_warn.assert_called_once_with(sbd.SBDManager.DISKLESS_SBD_WARNING)
+
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.get_quorum_votes_dict')
+ def test_warn_diskless_sbd_peer(self, mock_vote, mock_warn):
+ mock_vote.return_value = {'Expected': '1'}
+ self.sbd_inst_diskless._warn_diskless_sbd("node2")
+ mock_vote.assert_called_once_with("node2")
+ mock_warn.assert_called_once_with(sbd.SBDManager.DISKLESS_SBD_WARNING)
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.sbd_init()
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.sbd.SBDManager._update_sbd_configuration')
+ @mock.patch('crmsh.sbd.SBDManager._initialize_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init_return(self, mock_package, mock_watchdog, mock_get_device, mock_initialize, mock_update, mock_invoke):
+ mock_package.return_value = True
+ self.sbd_inst._sbd_devices = None
+ self.sbd_inst.diskless_sbd = False
+ self.sbd_inst._context = mock.Mock(watchdog=None)
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.init_watchdog = mock.Mock()
+
+ self.sbd_inst.sbd_init()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_get_device.assert_called_once_with()
+ mock_initialize.assert_not_called()
+ mock_update.assert_not_called()
+ mock_watchdog.assert_called_once_with(_input=None)
+ mock_watchdog_inst.init_watchdog.assert_called_once_with()
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._enable_sbd_service')
+ @mock.patch('crmsh.sbd.SBDManager._warn_diskless_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._update_sbd_configuration')
+ @mock.patch('crmsh.sbd.SBDManager._initialize_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_sbd_init(self, mock_package, mock_watchdog, mock_get_device, mock_initialize, mock_update, mock_warn, mock_enable_sbd):
+ mock_package.return_value = True
+ self.sbd_inst_diskless._context = mock.Mock(watchdog=None)
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.init_watchdog = mock.Mock()
+ self.sbd_inst_diskless.sbd_init()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_get_device.assert_called_once_with()
+ mock_initialize.assert_called_once_with()
+ mock_update.assert_called_once_with()
+ mock_watchdog.assert_called_once_with(_input=None)
+ mock_watchdog_inst.init_watchdog.assert_called_once_with()
+ mock_warn.assert_called_once_with()
+ mock_enable_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager.configure_sbd_resource_and_properties')
+ @mock.patch('crmsh.bootstrap.wait_for_cluster')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed_no_ra_running(self, mock_parser, mock_status, mock_cluster_run, mock_wait, mock_config_sbd_ra):
+ mock_parser().is_any_resource_running.return_value = False
+ self.sbd_inst._restart_cluster_and_configure_sbd_ra()
+ mock_status.assert_called_once_with("Restarting cluster service")
+ mock_cluster_run.assert_called_once_with("crm cluster restart")
+ mock_wait.assert_called_once_with()
+ mock_config_sbd_ra.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_timeout')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed_diskless(self, mock_parser, mock_warn, mock_get_timeout):
+ mock_parser().is_any_resource_running.return_value = True
+ mock_get_timeout.return_value = 60
+ self.sbd_inst_diskless.timeout_inst = mock.Mock(stonith_watchdog_timeout=-1)
+ self.sbd_inst_diskless._restart_cluster_and_configure_sbd_ra()
+ mock_warn.assert_has_calls([
+ mock.call("To start sbd.service, need to restart cluster service manually on each node"),
+ mock.call("Then run \"crm configure property stonith-enabled=true stonith-watchdog-timeout=-1 stonith-timeout=60\" on any node")
+ ])
+
+ @mock.patch('crmsh.sbd.SBDManager.configure_sbd_resource_and_properties')
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ def test_restart_cluster_on_needed(self, mock_parser, mock_warn, mock_config_sbd_ra):
+ mock_parser().is_any_resource_running.return_value = True
+ self.sbd_inst._restart_cluster_and_configure_sbd_ra()
+ mock_warn.assert_has_calls([
+ mock.call("To start sbd.service, need to restart cluster service manually on each node"),
+ ])
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ def test_enable_sbd_service_init(self, mock_invoke):
+ self.sbd_inst._context = mock.Mock(cluster_is_running=False)
+ self.sbd_inst._enable_sbd_service()
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._restart_cluster_and_configure_sbd_ra')
+ @mock.patch('crmsh.utils.cluster_run_cmd')
+ def test_enable_sbd_service_restart(self, mock_cluster_run, mock_restart):
+ self.sbd_inst._context = mock.Mock(cluster_is_running=True)
+ self.sbd_inst._enable_sbd_service()
+ mock_cluster_run.assert_has_calls([
+ mock.call("systemctl enable sbd.service"),
+ ])
+ mock_restart.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_configure_sbd_resource_and_properties_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.configure_sbd_resource_and_properties()
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.sbd.SBDTimeout.adjust_sbd_timeout_related_cluster_configuration')
+ @mock.patch('crmsh.utils.set_property')
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_configure_sbd_resource_and_properties(
+ self,
+ mock_package, mock_enabled, mock_parser, mock_run, mock_set_property, sbd_adjust, mock_is_active,
+ ):
+ mock_package.return_value = True
+ mock_enabled.return_value = True
+ mock_parser().is_resource_configured.return_value = False
+ mock_is_active.return_value = False
+ self.sbd_inst._context = mock.Mock(cluster_is_running=True)
+ self.sbd_inst._get_sbd_device_from_config = mock.Mock()
+ self.sbd_inst._get_sbd_device_from_config.return_value = ["/dev/sda1"]
+
+ self.sbd_inst.configure_sbd_resource_and_properties()
+
+ mock_package.assert_called_once_with("sbd")
+ mock_enabled.assert_called_once_with("sbd.service")
+ mock_run.assert_called_once_with("crm configure primitive {} {}".format(sbd.SBDManager.SBD_RA_ID, sbd.SBDManager.SBD_RA))
+ mock_set_property.assert_called_once_with("stonith-enabled", "true")
+
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_not_installed(self, mock_package):
+ mock_package.return_value = False
+ self.sbd_inst.join_sbd("alice", "node1")
+ mock_package.assert_called_once_with("sbd")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_not_exist(self, mock_package, mock_exists, mock_invoke):
+ mock_package.return_value = True
+ mock_exists.return_value = False
+ self.sbd_inst.join_sbd("alice", "node1")
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_config_disabled(self, mock_package, mock_exists, mock_enabled, mock_invoke):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = False
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl disable sbd.service")
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_verify, mock_status):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = True
+ mock_get_device.return_value = ["/dev/sdb1"]
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.join_watchdog = mock.Mock()
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_verify.assert_called_once_with(["/dev/sdb1"], ["node1"])
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+ mock_status.assert_called_once_with("Got SBD configuration")
+ mock_watchdog.assert_called_once_with(remote_user="alice", peer_host="node1")
+ mock_watchdog_inst.join_watchdog.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.sysconfig_set')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.sbd.SBDManager._warn_diskless_sbd')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.watchdog.Watchdog')
+ @mock.patch('crmsh.bootstrap.invoke')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled')
+ @mock.patch('os.path.exists')
+ @mock.patch('crmsh.utils.package_is_installed')
+ def test_join_sbd_diskless(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_warn, mock_status, mock_set):
+ mock_package.return_value = True
+ mock_exists.return_value = True
+ mock_enabled.return_value = True
+ mock_get_device.return_value = []
+ mock_watchdog_inst = mock.Mock()
+ mock_watchdog.return_value = mock_watchdog_inst
+ mock_watchdog_inst.join_watchdog = mock.Mock()
+
+ self.sbd_inst.join_sbd("alice", "node1")
+
+ mock_package.assert_called_once_with("sbd")
+ mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+ mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+ mock_get_device.assert_called_once_with()
+ mock_warn.assert_called_once_with("node1")
+ mock_enabled.assert_called_once_with("sbd.service", "node1")
+ mock_status.assert_called_once_with("Got diskless SBD configuration")
+ mock_watchdog.assert_called_once_with(remote_user="alice", peer_host="node1")
+ mock_watchdog_inst.join_watchdog.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ def test_verify_sbd_device_classmethod_exception(self, mock_get_config):
+ mock_get_config.return_value = []
+ with self.assertRaises(ValueError) as err:
+ sbd.SBDManager.verify_sbd_device()
+ self.assertEqual("No sbd device configured", str(err.exception))
+ mock_get_config.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._verify_sbd_device')
+ @mock.patch('crmsh.utils.list_cluster_nodes_except_me')
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ def test_verify_sbd_device_classmethod(self, mock_get_config, mock_list_nodes, mock_verify):
+ mock_get_config.return_value = ["/dev/sda1"]
+ mock_list_nodes.return_value = ["node1"]
+ sbd.SBDManager.verify_sbd_device()
+ mock_get_config.assert_called_once_with()
+ mock_verify.assert_called_once_with(["/dev/sda1"], ["node1"])
+
+ @mock.patch('crmsh.sbd.SBDManager._get_device_uuid')
+ def test_compare_device_uuid_return(self, mock_get_uuid):
+ self.sbd_inst._compare_device_uuid("/dev/sdb1", None)
+ mock_get_uuid.assert_not_called()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_device_uuid')
+ def test_compare_device_uuid(self, mock_get_uuid):
+ mock_get_uuid.side_effect = ["1234", "5678"]
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._compare_device_uuid("/dev/sdb1", ["node1"])
+ self.assertEqual("Device /dev/sdb1 doesn't have the same UUID with node1", str(err.exception))
+ mock_get_uuid.assert_has_calls([mock.call("/dev/sdb1"), mock.call("/dev/sdb1", "node1")])
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_device_uuid_not_match(self, mock_run):
+ mock_run.return_value = "data"
+ with self.assertRaises(ValueError) as err:
+ self.sbd_inst._get_device_uuid("/dev/sdb1")
+ self.assertEqual("Cannot find sbd device UUID for /dev/sdb1", str(err.exception))
+ mock_run.assert_called_once_with("sbd -d /dev/sdb1 dump", None)
+
+ @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+ def test_get_device_uuid(self, mock_run):
+ output = """
+ ==Dumping header on disk /dev/sda1
+ Header version : 2.1
+ UUID : a2e9a92c-cc72-4ef9-ac55-ccc342f3546b
+ Number of slots : 255
+ Sector size : 512
+ Timeout (watchdog) : 5
+ Timeout (allocate) : 2
+ Timeout (loop) : 1
+ Timeout (msgwait) : 10
+ ==Header on disk /dev/sda1 is dumped
+ """
+ mock_run.return_value = output
+ res = self.sbd_inst._get_device_uuid("/dev/sda1", node="node1")
+ self.assertEqual(res, "a2e9a92c-cc72-4ef9-ac55-ccc342f3546b")
+ mock_run.assert_called_once_with("sbd -d /dev/sda1 dump", "node1")
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_is_using_diskless_sbd_true(self, mock_context, mock_is_active, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = []
+ mock_is_active.return_value = True
+ assert sbd.SBDManager.is_using_diskless_sbd() is True
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+ mock_is_active.assert_called_once_with("sbd.service")
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_is_using_diskless_sbd_false(self, mock_context, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = ["/dev/sda1"]
+ assert sbd.SBDManager.is_using_diskless_sbd() is False
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.sbd.SBDManager._get_sbd_device_from_config')
+ @mock.patch('crmsh.bootstrap.Context')
+ def test_get_sbd_device_from_config_classmethod(self, mock_context, mock_get_sbd):
+ context_inst = mock.Mock()
+ mock_context.return_value = context_inst
+ mock_get_sbd.return_value = ["/dev/sda1"]
+ assert sbd.SBDManager.get_sbd_device_from_config() == ["/dev/sda1"]
+ mock_context.assert_called_once_with()
+ mock_get_sbd.assert_called_once_with()
+
+ @mock.patch('crmsh.bootstrap.sync_file')
+ @mock.patch('crmsh.utils.sysconfig_set')
+ def test_update_configuration_static(self, mock_config_set, mock_csync2):
+ sbd_config_dict = {
+ "SBD_PACEMAKER": "yes",
+ "SBD_STARTMODE": "always",
+ "SBD_DELAY_START": "no",
+ }
+ self.sbd_inst.update_configuration(sbd_config_dict)
+ mock_config_set.assert_called_once_with(bootstrap.SYSCONFIG_SBD, **sbd_config_dict)
+ mock_csync2.assert_called_once_with(bootstrap.SYSCONFIG_SBD)
diff --git a/test/unittests/test_scripts.py b/test/unittests/test_scripts.py
new file mode 100644
index 0000000..04c74e2
--- /dev/null
+++ b/test/unittests/test_scripts.py
@@ -0,0 +1,914 @@
+from __future__ import print_function
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from builtins import str
+from builtins import object
+from os import path
+from pprint import pprint
+import pytest
+from lxml import etree
+from crmsh import scripts
+from crmsh import ra
+from crmsh import utils
+
+scripts._script_dirs = lambda: [path.join(path.dirname(__file__), 'scripts')]
+
+_apache = '''<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="apache">
+<version>1.0</version>
+
+<longdesc lang="en">
+This is the resource agent for the Apache Web server.
+This resource agent operates both version 1.x and version 2.x Apache
+servers.
+
+The start operation ends with a loop in which monitor is
+repeatedly called to make sure that the server started and that
+it is operational. Hence, if the monitor operation does not
+succeed within the start operation timeout, the apache resource
+will end with an error status.
+
+The monitor operation by default loads the server status page
+which depends on the mod_status module and the corresponding
+configuration file (usually /etc/apache2/mod_status.conf).
+Make sure that the server status page works and that the access
+is allowed *only* from localhost (address 127.0.0.1).
+See the statusurl and testregex attributes for more details.
+
+See also http://httpd.apache.org/
+</longdesc>
+<shortdesc lang="en">Manages an Apache Web server instance</shortdesc>
+
+<parameters>
+<parameter name="configfile" required="0" unique="1">
+<longdesc lang="en">
+The full pathname of the Apache configuration file.
+This file is parsed to provide defaults for various other
+resource agent parameters.
+</longdesc>
+<shortdesc lang="en">configuration file path</shortdesc>
+<content type="string" default="$(detect_default_config)" />
+</parameter>
+
+<parameter name="httpd">
+<longdesc lang="en">
+The full pathname of the httpd binary (optional).
+</longdesc>
+<shortdesc lang="en">httpd binary path</shortdesc>
+<content type="string" default="/usr/sbin/httpd" />
+</parameter>
+
+<parameter name="port" >
+<longdesc lang="en">
+A port number that we can probe for status information
+using the statusurl.
+This will default to the port number found in the
+configuration file, or 80, if none can be found
+in the configuration file.
+
+</longdesc>
+<shortdesc lang="en">httpd port</shortdesc>
+<content type="integer" />
+</parameter>
+
+<parameter name="statusurl">
+<longdesc lang="en">
+The URL to monitor (the apache server status page by default).
+If left unspecified, it will be inferred from
+the apache configuration file.
+
+If you set this, make sure that it succeeds *only* from the
+localhost (127.0.0.1). Otherwise, it may happen that the cluster
+complains about the resource being active on multiple nodes.
+</longdesc>
+<shortdesc lang="en">url name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testregex">
+<longdesc lang="en">
+Regular expression to match in the output of statusurl.
+Case insensitive.
+</longdesc>
+<shortdesc lang="en">monitor regular expression</shortdesc>
+<content type="string" default="exists, but impossible to show in a human readable format (try grep testregex)"/>
+</parameter>
+
+<parameter name="client">
+<longdesc lang="en">
+Client to use to query to Apache. If not specified, the RA will
+try to find one on the system. Currently, wget and curl are
+supported. For example, you can set this parameter to "curl" if
+you prefer that to wget.
+</longdesc>
+<shortdesc lang="en">http client</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="testurl">
+<longdesc lang="en">
+URL to test. If it does not start with "http", then it's
+considered to be relative to the Listen address.
+</longdesc>
+<shortdesc lang="en">test url</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testregex10">
+<longdesc lang="en">
+Regular expression to match in the output of testurl.
+Case insensitive.
+</longdesc>
+<shortdesc lang="en">extended monitor regular expression</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testconffile">
+<longdesc lang="en">
+A file which contains test configuration. Could be useful if
+you have to check more than one web application or in case sensitive
+info should be passed as arguments (passwords). Furthermore,
+using a config file is the only way to specify certain
+parameters.
+
+Please see README.webapps for examples and file description.
+</longdesc>
+<shortdesc lang="en">test configuration file</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="testname">
+<longdesc lang="en">
+Name of the test within the test configuration file.
+</longdesc>
+<shortdesc lang="en">test name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="options">
+<longdesc lang="en">
+Extra options to apply when starting apache. See man httpd(8).
+</longdesc>
+<shortdesc lang="en">command line options</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="envfiles">
+<longdesc lang="en">
+Files (one or more) which contain extra environment variables.
+If you want to prevent script from reading the default file, set
+this parameter to empty string.
+</longdesc>
+<shortdesc lang="en">environment settings files</shortdesc>
+<content type="string" default="/etc/apache2/envvars"/>
+</parameter>
+
+<parameter name="use_ipv6">
+<longdesc lang="en">
+We will try to detect if the URL (for monitor) is IPv6, but if
+that doesn't work set this to true to enforce IPv6.
+</longdesc>
+<shortdesc lang="en">use ipv6 with http clients</shortdesc>
+<content type="boolean" default="false"/>
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="40s" />
+<action name="stop" timeout="60s" />
+<action name="status" timeout="30s" />
+<action name="monitor" depth="0" timeout="20s" interval="10" />
+<action name="meta-data" timeout="5" />
+<action name="validate-all" timeout="5" />
+</actions>
+</resource-agent>
+'''
+
+_virtual_ip = '''<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="IPaddr2">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource manages IP alias IP addresses.
+It can add an IP alias, or remove one.
+In addition, it can implement Cluster Alias IP functionality
+if invoked as a clone resource.
+
+If used as a clone, you should explicitly set clone-node-max &gt;= 2,
+and/or clone-max &lt; number of nodes. In case of node failure,
+clone instances need to be re-allocated on surviving nodes.
+This would not be possible if there is already an instance on those nodes,
+and clone-node-max=1 (which is the default).
+</longdesc>
+
+<shortdesc lang="en">Manages virtual IPv4 and IPv6 addresses (Linux specific version)</shortdesc>
+
+<parameters>
+<parameter name="ip" unique="1" required="1">
+<longdesc lang="en">
+The IPv4 (dotted quad notation) or IPv6 address (colon hexadecimal notation)
+example IPv4 "192.168.1.1".
+example IPv6 "2001:db8:DC28:0:0:FC57:D4C8:1FFF".
+</longdesc>
+<shortdesc lang="en">IPv4 or IPv6 address</shortdesc>
+<content type="string" default="" />
+</parameter>
+<parameter name="nic" unique="0">
+<longdesc lang="en">
+The base network interface on which the IP address will be brought
+online.
+If left empty, the script will try and determine this from the
+routing table.
+
+Do NOT specify an alias interface in the form eth0:1 or anything here;
+rather, specify the base interface only.
+If you want a label, see the iflabel parameter.
+
+Prerequisite:
+
+There must be at least one static IP address, which is not managed by
+the cluster, assigned to the network interface.
+If you can not assign any static IP address on the interface,
+modify this kernel parameter:
+
+sysctl -w net.ipv4.conf.all.promote_secondaries=1 # (or per device)
+</longdesc>
+<shortdesc lang="en">Network interface</shortdesc>
+<content type="string"/>
+</parameter>
+
+<parameter name="cidr_netmask">
+<longdesc lang="en">
+The netmask for the interface in CIDR format
+(e.g., 24 and not 255.255.255.0)
+
+If unspecified, the script will also try to determine this from the
+routing table.
+</longdesc>
+<shortdesc lang="en">CIDR netmask</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="broadcast">
+<longdesc lang="en">
+Broadcast address associated with the IP. If left empty, the script will
+determine this from the netmask.
+</longdesc>
+<shortdesc lang="en">Broadcast address</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="iflabel">
+<longdesc lang="en">
+You can specify an additional label for your IP address here.
+This label is appended to your interface name.
+
+The kernel allows alphanumeric labels up to a maximum length of 15
+characters including the interface name and colon (e.g. eth0:foobar1234)
+
+A label can be specified in nic parameter but it is deprecated.
+If a label is specified in nic name, this parameter has no effect.
+</longdesc>
+<shortdesc lang="en">Interface label</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="lvs_support">
+<longdesc lang="en">
+Enable support for LVS Direct Routing configurations. In case a IP
+address is stopped, only move it to the loopback device to allow the
+local node to continue to service requests, but no longer advertise it
+on the network.
+
+Notes for IPv6:
+It is not necessary to enable this option on IPv6.
+Instead, enable 'lvs_ipv6_addrlabel' option for LVS-DR usage on IPv6.
+</longdesc>
+<shortdesc lang="en">Enable support for LVS DR</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_lvs_support_default}"/>
+</parameter>
+
+<parameter name="lvs_ipv6_addrlabel">
+<longdesc lang="en">
+Enable adding IPv6 address label so IPv6 traffic originating from
+the address's interface does not use this address as the source.
+This is necessary for LVS-DR health checks to realservers to work. Without it,
+the most recently added IPv6 address (probably the address added by IPaddr2)
+will be used as the source address for IPv6 traffic from that interface and
+since that address exists on loopback on the realservers, the realserver
+response to pings/connections will never leave its loopback.
+See RFC3484 for the detail of the source address selection.
+
+See also 'lvs_ipv6_addrlabel_value' parameter.
+</longdesc>
+<shortdesc lang="en">Enable adding IPv6 address label.</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_lvs_ipv6_addrlabel_default}"/>
+</parameter>
+
+<parameter name="lvs_ipv6_addrlabel_value">
+<longdesc lang="en">
+Specify IPv6 address label value used when 'lvs_ipv6_addrlabel' is enabled.
+The value should be an unused label in the policy table
+which is shown by 'ip addrlabel list' command.
+You would rarely need to change this parameter.
+</longdesc>
+<shortdesc lang="en">IPv6 address label value.</shortdesc>
+<content type="integer" default="${OCF_RESKEY_lvs_ipv6_addrlabel_value_default}"/>
+</parameter>
+
+<parameter name="mac">
+<longdesc lang="en">
+Set the interface MAC address explicitly. Currently only used in case of
+the Cluster IP Alias. Leave empty to chose automatically.
+
+</longdesc>
+<shortdesc lang="en">Cluster IP MAC address</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="clusterip_hash">
+<longdesc lang="en">
+Specify the hashing algorithm used for the Cluster IP functionality.
+
+</longdesc>
+<shortdesc lang="en">Cluster IP hashing function</shortdesc>
+<content type="string" default="${OCF_RESKEY_clusterip_hash_default}"/>
+</parameter>
+
+<parameter name="unique_clone_address">
+<longdesc lang="en">
+If true, add the clone ID to the supplied value of IP to create
+a unique address to manage
+</longdesc>
+<shortdesc lang="en">Create a unique address for cloned instances</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_unique_clone_address_default}"/>
+</parameter>
+
+<parameter name="arp_interval">
+<longdesc lang="en">
+Specify the interval between unsolicited ARP packets in milliseconds.
+</longdesc>
+<shortdesc lang="en">ARP packet interval in ms</shortdesc>
+<content type="integer" default="${OCF_RESKEY_arp_interval_default}"/>
+</parameter>
+
+<parameter name="arp_count">
+<longdesc lang="en">
+Number of unsolicited ARP packets to send.
+</longdesc>
+<shortdesc lang="en">ARP packet count</shortdesc>
+<content type="integer" default="${OCF_RESKEY_arp_count_default}"/>
+</parameter>
+
+<parameter name="arp_bg">
+<longdesc lang="en">
+Whether or not to send the ARP packets in the background.
+</longdesc>
+<shortdesc lang="en">ARP from background</shortdesc>
+<content type="string" default="${OCF_RESKEY_arp_bg_default}"/>
+</parameter>
+
+<parameter name="arp_mac">
+<longdesc lang="en">
+MAC address to send the ARP packets to.
+
+You really shouldn't be touching this.
+
+</longdesc>
+<shortdesc lang="en">ARP MAC</shortdesc>
+<content type="string" default="${OCF_RESKEY_arp_mac_default}"/>
+</parameter>
+
+<parameter name="arp_sender">
+<longdesc lang="en">
+The program to send ARP packets with on start. For infiniband
+interfaces, default is ipoibarping. If ipoibarping is not
+available, set this to send_arp.
+</longdesc>
+<shortdesc lang="en">ARP sender</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="flush_routes">
+<longdesc lang="en">
+Flush the routing table on stop. This is for
+applications which use the cluster IP address
+and which run on the same physical host that the
+IP address lives on. The Linux kernel may force that
+application to take a shortcut to the local loopback
+interface, instead of the interface the address
+is really bound to. Under those circumstances, an
+application may, somewhat unexpectedly, continue
+to use connections for some time even after the
+IP address is deconfigured. Set this parameter in
+order to immediately disable said shortcut when the
+IP address goes away.
+</longdesc>
+<shortdesc lang="en">Flush kernel routing table on stop</shortdesc>
+<content type="boolean" default="false"/>
+</parameter>
+
+</parameters>
+<actions>
+<action name="start" timeout="20s" />
+<action name="stop" timeout="20s" />
+<action name="status" depth="0" timeout="20s" interval="10s" />
+<action name="monitor" depth="0" timeout="20s" interval="10s" />
+<action name="meta-data" timeout="5s" />
+<action name="validate-all" timeout="20s" />
+</actions>
+</resource-agent>
+'''
+
+_saved_get_ra = ra.get_ra
+_saved_cluster_nodes = utils.list_cluster_nodes
+
+
+def setup_function():
+ "hijack ra.get_ra to add new resource class (of sorts)"
+ class Agent(object):
+ def __init__(self, name):
+ self.name = name
+
+ def meta(self):
+ if self.name == 'apache':
+ return etree.fromstring(_apache)
+ else:
+ return etree.fromstring(_virtual_ip)
+
+ def _get_ra(agent):
+ if agent.startswith('test:'):
+ return Agent(agent[5:])
+ return _saved_get_ra(agent)
+ ra.get_ra = _get_ra
+
+ utils.list_cluster_nodes = lambda: [utils.this_node(), 'a', 'b', 'c']
+
+
+def teardown_function():
+ ra.get_ra = _saved_get_ra
+ utils.list_cluster_nodes = _saved_cluster_nodes
+
+
+def test_list():
+ assert set(['v2', 'legacy', '10-webserver', 'inc1', 'inc2', 'vip', 'vipinc', 'unified']) == set(s for s in scripts.list_scripts())
+
+
+def test_load_legacy():
+ script = scripts.load_script('legacy')
+ assert script is not None
+ assert 'legacy' == script['name']
+ assert len(script['shortdesc']) > 0
+ pprint(script)
+ actions = scripts.verify(script, {}, external_check=False)
+ pprint(actions)
+ assert [{'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Configure SSH',
+ 'text': '',
+ 'value': 'configure.py ssh'},
+ {'longdesc': '',
+ 'name': 'collect',
+ 'shortdesc': 'Check state of nodes',
+ 'text': '',
+ 'value': 'collect.py'},
+ {'longdesc': '',
+ 'name': 'validate',
+ 'shortdesc': 'Verify parameters',
+ 'text': '',
+ 'value': 'verify.py'},
+ {'longdesc': '',
+ 'name': 'apply',
+ 'shortdesc': 'Install packages',
+ 'text': '',
+ 'value': 'configure.py install'},
+ {'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Generate corosync authkey',
+ 'text': '',
+ 'value': 'authkey.py'},
+ {'longdesc': '',
+ 'name': 'apply',
+ 'shortdesc': 'Configure cluster nodes',
+ 'text': '',
+ 'value': 'configure.py corosync'},
+ {'longdesc': '',
+ 'name': 'apply_local',
+ 'shortdesc': 'Initialize cluster',
+ 'text': '',
+ 'value': 'init.py'}] == actions
+
+
+def test_load_workflow():
+ script = scripts.load_script('10-webserver')
+ assert script is not None
+ assert '10-webserver' == script['name']
+ assert len(script['shortdesc']) > 0
+
+
+def test_v2():
+ script = scripts.load_script('v2')
+ assert script is not None
+ assert 'v2' == script['name']
+ assert len(script['shortdesc']) > 0
+
+ actions = scripts.verify(
+ script,
+ {'id': 'www',
+ 'apache': {'id': 'apache'},
+ 'virtual-ip': {'id': 'www-vip', 'ip': '192.168.1.100'},
+ 'install': False}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert str(actions[0]['text']).find('group www') >= 0
+
+ actions = scripts.verify(
+ script,
+ {'id': 'www',
+ 'apache': {'id': 'apache'},
+ 'virtual-ip': {'id': 'www-vip', 'ip': '192.168.1.100'},
+ 'install': True}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 3
+
+
+def test_agent_include():
+ inc2 = scripts.load_script('inc2')
+ actions = scripts.verify(
+ inc2,
+ {'wiz': 'abc',
+ 'foo': 'cde',
+ 'included-script': {'foo': True, 'bar': 'bah bah'}}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 6
+ assert '33\n\nabc' == actions[-1]['text'].strip()
+
+
+def test_vipinc():
+ script = scripts.load_script('vipinc')
+ assert script is not None
+ actions = scripts.verify(
+ script,
+ {'vip': {'id': 'vop', 'ip': '10.0.0.4'}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'].find('primitive vop test:virtual-ip\n\tip="10.0.0.4"') >= 0
+ assert actions[0]['text'].find("clone c-vop vop") >= 0
+
+
+def test_value_replace_handles():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ value: bar
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: test-a
+ parameters:
+ - name: foo
+ value: "{{wiz}}+{{wiz}}"
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - cib: "{{test-a:foo}}"
+'''
+
+ script_a = scripts.load_script_string('test-a', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+ actions = scripts.verify(script_b,
+ {'wiz': "SARUMAN"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "SARUMAN+SARUMAN"
+
+
+def test_optional_step_ref():
+ """
+ It seems I have a bug in referencing ids from substeps.
+ """
+ a = '''---
+- version: 2.2
+ category: Script
+ include:
+ - agent: test:apache
+ name: apache
+ parameters:
+ - name: id
+ required: true
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: apache
+ required: false
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - cib: "primitive {{wiz}} {{apache:id}}"
+'''
+
+ script_a = scripts.load_script_string('apache', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ actions = scripts.verify(script_a,
+ {"id": "apacho"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive apacho test:apache"
+
+ #import ipdb
+ #ipdb.set_trace()
+ actions = scripts.verify(script_b,
+ {'wiz': "SARUMAN", "apache": {"id": "apacho"}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive SARUMAN apacho"
+
+
+def test_enums_basic():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ values:
+ - one
+ - two
+ - three
+ actions:
+ - cib: "{{foo}}"
+'''
+
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ actions = scripts.verify(script_a,
+ {"foo": "one"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "one"
+
+ actions = scripts.verify(script_a,
+ {"foo": "three"}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "three"
+
+
+def test_enums_fail():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ values:
+ - one
+ - two
+ - three
+ actions:
+ - cib: "{{foo}}"
+'''
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ def ver():
+ return scripts.verify(script_a, {"foo": "wrong"}, external_check=False)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_enums_fail2():
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: enum
+ actions:
+ - cib: "{{foo}}"
+'''
+ script_a = scripts.load_script_string('test-a', a)
+ assert script_a is not None
+
+ def ver():
+ return scripts.verify(script_a, {"foo": "one"}, external_check=False)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_two_substeps():
+ """
+ There is a scoping bug
+ """
+ a = '''---
+- version: 2.2
+ category: Script
+ include:
+ - agent: test:apache
+ name: apache
+ parameters:
+ - name: id
+ required: true
+'''
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: apache
+ name: apache-a
+ required: true
+ - script: apache
+ name: apache-b
+ required: true
+ parameters:
+ - name: wiz
+ required: true
+ actions:
+ - include: apache-a
+ - include: apache-b
+ - cib: "primitive {{wiz}} {{apache-a:id}} {{apache-b:id}}"
+'''
+
+ script_a = scripts.load_script_string('apache', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ actions = scripts.verify(script_b,
+ {'wiz': "head", "apache-a": {"id": "one"}, "apache-b": {"id": "two"}}, external_check=False)
+ assert len(actions) == 1
+ pprint(actions)
+ assert actions[0]['text'] == "primitive one test:apache\n\nprimitive two test:apache\n\nprimitive head one two"
+
+
+def test_required_subscript_params():
+ """
+ If an optional subscript has multiple required parameters,
+ excluding all = ok
+ excluding one = fail
+ """
+
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: string
+ - name: bar
+ required: true
+ type: string
+ actions:
+ - cib: "{{foo}} {{bar}}"
+'''
+
+ b = '''---
+- version: 2.2
+ category: Script
+ include:
+ - script: foofoo
+ required: false
+ actions:
+ - include: foofoo
+ - cib: "{{foofoo:foo}} {{foofoo:bar}"
+'''
+
+ script_a = scripts.load_script_string('foofoo', a)
+ script_b = scripts.load_script_string('test-b', b)
+ assert script_a is not None
+ assert script_b is not None
+
+ def ver():
+ actions = scripts.verify(script_b,
+ {"foofoo": {"foo": "one"}}, external_check=False)
+ pprint(actions)
+ with pytest.raises(ValueError):
+ ver()
+
+
+def test_unified():
+ unified = scripts.load_script('unified')
+ actions = scripts.verify(
+ unified,
+ {'id': 'foo',
+ 'vip': {'id': 'bar', 'ip': '192.168.0.15'}}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert 'primitive bar IPaddr2 ip=192.168.0.15\ngroup g-foo foo bar' == actions[-1]['text'].strip()
+
+
+class TestPrinter(object):
+ def __init__(self):
+ import types
+ self.actions = []
+
+ def add_capture(name):
+ def capture(obj, *args):
+ obj.actions.append((name, args))
+ self.__dict__[name] = types.MethodType(capture, self)
+ for name in ('print_header', 'debug', 'error', 'start', 'flush', 'print_command', 'finish'):
+ add_capture(name)
+
+def test_inline_script():
+ """
+ Test inline script feature for call actions
+ """
+
+ a = '''---
+- version: 2.2
+ category: Script
+ parameters:
+ - name: foo
+ required: true
+ type: string
+ actions:
+ - call: |
+ #!/bin/sh
+ echo "{{foo}}"
+ nodes: local
+'''
+
+ script_a = scripts.load_script_string('foofoo', a)
+ assert script_a is not None
+
+ actions = scripts.verify(script_a,
+ {"foo": "hello world"}, external_check=False)
+ pprint(actions)
+ assert len(actions) == 1
+ assert actions[0]['name'] == 'call'
+ assert actions[0]['value'] == '#!/bin/sh\necho "hello world"'
+ tp = TestPrinter()
+ scripts.run(script_a,
+ {"foo": "hello world"}, tp)
+
+ for action, args in tp.actions:
+ print(action, args)
+ if action == 'finish':
+ assert args[0]['value'] == '#!/bin/sh\necho "hello world"'
+
+
+def test_when_expression():
+ """
+ Test when expressions
+ """
+ def runtest(when, val):
+ the_script = '''version: 2.2
+shortdesc: Test when expressions
+longdesc: See if more complicated expressions work
+parameters:
+ - name: stringtest
+ type: string
+ shortdesc: A test string
+actions:
+ - call: "echo '{{stringtest}}'"
+ when: %s
+'''
+ scrpt = scripts.load_script_string('{}_{}'.format(when, val), the_script % when)
+ assert scrpt is not None
+
+ a1 = scripts.verify(scrpt,
+ {"stringtest": val},
+ external_check=False)
+ pprint(a1)
+ return a1
+
+ a1 = runtest('stringtest == "balloon"', "balloon")
+ assert len(a1) == 1 and a1[0]['value'] == "echo 'balloon'"
+
+ a1 = runtest('stringtest == "balloon"', "not a balloon")
+ assert len(a1) == 0
+
+ a1 = runtest('stringtest != "balloon"', "not a balloon")
+ assert len(a1) == 1
+
+ a1 = runtest('stringtest != "balloon"', "balloon")
+ assert len(a1) == 0
+
+ a1 = runtest('stringtest == "{{dry_run}}"', "no")
+ assert len(a1) == 1
+
+ a1 = runtest('stringtest == "yes" or stringtest == "no"', "yes")
+ assert len(a1) == 1
diff --git a/test/unittests/test_service_manager.py b/test/unittests/test_service_manager.py
new file mode 100644
index 0000000..082fc3c
--- /dev/null
+++ b/test/unittests/test_service_manager.py
@@ -0,0 +1,84 @@
+import unittest
+from unittest import mock
+
+import crmsh.sh
+from crmsh.service_manager import ServiceManager
+
+
+@mock.patch("crmsh.service_manager.ServiceManager._call_with_parallax")
+class TestServiceManager(unittest.TestCase):
+ """
+ Unitary tests for class ServiceManager
+ """
+
+ def setUp(self) -> None:
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._call = mock.Mock(self.service_manager._call)
+
+ def test_call_single_node(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._run_on_single_host.return_value = 0
+ self.assertEqual(['node1'], self.service_manager._call('node1', list(), 'foo'))
+ self.service_manager._run_on_single_host.assert_called_once_with('foo', 'node1')
+ mock_call_with_parallax.assert_not_called()
+
+ def test_call_single_node_failure(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ self.service_manager._run_on_single_host.return_value = 1
+ self.assertEqual(list(), self.service_manager._call('node1', list(), 'foo'))
+ self.service_manager._run_on_single_host.assert_called_once_with('foo', 'node1')
+ mock_call_with_parallax.assert_not_called()
+
+ def test_call_multiple_node(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._run_on_single_host = mock.Mock(self.service_manager._run_on_single_host)
+ mock_call_with_parallax.return_value = {'node1': (0, '', ''), 'node2': (1, 'out', 'err')}
+ self.assertEqual(['node1'], self.service_manager._call(None, ['node1', 'node2'], 'foo'))
+ self.service_manager._run_on_single_host.assert_not_called()
+ mock_call_with_parallax.assert_called_once_with('foo', ['node1', 'node2'])
+
+ def test_run_on_single_host_return_1(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.return_value = (1, 'bar', 'err')
+ self.assertEqual(1, self.service_manager._run_on_single_host('foo', 'node1'))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.assert_called_once_with('node1', 'foo')
+
+ def test_run_on_single_host_return_255(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager = ServiceManager(mock.Mock(crmsh.sh.ClusterShell))
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.return_value = (255, 'bar', 'err')
+ with self.assertRaises(ValueError):
+ self.service_manager._run_on_single_host('foo', 'node1')
+ self.service_manager._shell.get_rc_stdout_stderr_without_input.assert_called_once_with('node1', 'foo')
+
+ def test_start_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.start_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl start 'service1'")
+
+ def test_start_service_on_multiple_host(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1', 'node2']
+ self.assertEqual(['node1', 'node2'], self.service_manager.start_service('service1', node_list=['node1', 'node2']))
+ self.service_manager._call.assert_called_once_with(None, ['node1', 'node2'], "systemctl start 'service1'")
+
+ def test_start_and_enable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.start_service('service1', enable=True, remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl enable --now 'service1'")
+
+ def test_stop_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.stop_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl stop 'service1'")
+
+ def test_enable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.enable_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl enable 'service1'")
+
+ def test_disable_service(self, mock_call_with_parallax: mock.MagicMock):
+ self.service_manager._call.return_value = ['node1']
+ self.assertEqual(['node1'], self.service_manager.disable_service('service1', remote_addr='node1'))
+ self.service_manager._call.assert_called_once_with('node1', [], "systemctl disable 'service1'")
diff --git a/test/unittests/test_sh.py b/test/unittests/test_sh.py
new file mode 100644
index 0000000..b3c0f0b
--- /dev/null
+++ b/test/unittests/test_sh.py
@@ -0,0 +1,189 @@
+import subprocess
+import unittest
+from unittest import mock
+
+import crmsh.sh
+from crmsh.user_of_host import UserOfHost
+
+
+class TestLocalShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.local_shell = crmsh.sh.LocalShell()
+ self.local_shell.get_effective_user_name = mock.Mock(self.local_shell.get_effective_user_name)
+ self.local_shell.geteuid = mock.Mock(self.local_shell.geteuid)
+ self.local_shell.hostname = mock.Mock(self.local_shell.hostname)
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'root'
+ self.local_shell.geteuid.return_value = 0
+ self.local_shell.su_subprocess_run(
+ 'alice', 'foo',
+ input=b'bar',
+ )
+ mock_run.assert_called_once_with(
+ ['su', 'alice', '--login', '-s', '/bin/sh', '-c', 'foo'],
+ input=b'bar',
+ )
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run_as_root(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'root'
+ self.local_shell.geteuid.return_value = 0
+ self.local_shell.su_subprocess_run(
+ 'root', 'foo',
+ input=b'bar',
+ )
+ mock_run.assert_called_once_with(
+ ['/bin/sh', '-c', 'foo'],
+ input=b'bar',
+ )
+
+ @mock.patch('subprocess.run')
+ def test_su_subprocess_run_unauthorized(self, mock_run: mock.MagicMock):
+ self.local_shell.get_effective_user_name.return_value = 'bob'
+ self.local_shell.geteuid.return_value = 1001
+ with self.assertRaises(crmsh.sh.AuthorizationError) as ctx:
+ self.local_shell.su_subprocess_run(
+ 'root', 'foo',
+ input=b'bar',
+ )
+ self.assertIsInstance(ctx.exception, ValueError)
+
+ def test_get_stdout_stderr_decoded_and_stripped(self):
+ self.local_shell.get_rc_stdout_stderr_raw = mock.Mock(self.local_shell.get_rc_stdout_stderr_raw)
+ self.local_shell.get_rc_stdout_stderr_raw.return_value = 1, b' out \n', b'\terr\t\n'
+ rc, out, err = self.local_shell.get_rc_stdout_stderr('alice', 'foo', 'input')
+ self.assertEqual(1, rc)
+ self.assertEqual('out', out)
+ self.assertEqual('err', err)
+ self.local_shell.get_rc_stdout_stderr_raw.assert_called_once_with(
+ 'alice', 'foo', b'input',
+ )
+
+ def test_get_stdout_or_raise_error(self):
+ self.local_shell.su_subprocess_run = mock.Mock(self.local_shell.su_subprocess_run)
+ self.local_shell.su_subprocess_run.return_value = subprocess.CompletedProcess(
+ args=mock.Mock(),
+ returncode=1,
+ stdout=b'foo',
+ stderr=b' bar ',
+ )
+ with self.assertRaises(crmsh.sh.CommandFailure) as ctx:
+ self.local_shell.get_stdout_or_raise_error('root', 'foo')
+ self.assertIsInstance(ctx.exception, ValueError)
+
+
+class TestSSHShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.ssh_shell = crmsh.sh.SSHShell(mock.Mock(crmsh.sh.LocalShell), 'alice')
+ self.ssh_shell.local_shell.hostname.return_value = 'node1'
+ self.ssh_shell.local_shell.get_effective_user_name.return_value = 'root'
+ self.ssh_shell.local_shell.can_run_as.return_value = True
+
+ def test_can_run_as(self):
+ self.ssh_shell.local_shell.get_rc_and_error.return_value = 255, 'bar'
+ self.assertFalse(self.ssh_shell.can_run_as('node2', 'root'))
+ self.ssh_shell.local_shell.can_run_as.assert_not_called()
+
+ def test_can_run_as_local(self):
+ self.assertTrue(self.ssh_shell.can_run_as(None, 'root'))
+ self.ssh_shell.local_shell.can_run_as.assert_called_once_with('root')
+
+ def test_subprocess_run_without_input(self):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ args, kwargs = self.ssh_shell.local_shell.su_subprocess_run.call_args
+ self.assertEqual('alice', args[0])
+ self.assertIn('bob@node2', args[1])
+ self.assertEqual(b'foo', kwargs['input'])
+ self.assertEqual(subprocess.PIPE, kwargs['stdout'])
+ self.assertEqual(subprocess.PIPE, kwargs['stderr'])
+
+ def test_subprocess_run_without_input_with_input_kwargs(self):
+ with self.assertRaises(AssertionError):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ input=b'bar'
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+ with self.assertRaises(AssertionError):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node2', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+
+ @mock.patch('subprocess.run')
+ def test_subprocess_run_without_input_local(self, mock_run):
+ self.ssh_shell.subprocess_run_without_input(
+ 'node1', 'bob',
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ self.ssh_shell.local_shell.su_subprocess_run.assert_not_called()
+ mock_run.assert_called_once_with(
+ ['sudo', '-H', '-u', 'bob', '/bin/sh'],
+ input=b'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+
+class TestClusterShell(unittest.TestCase):
+ def setUp(self) -> None:
+ self.cluster_shell = crmsh.sh.ClusterShell(mock.Mock(crmsh.sh.LocalShell), mock.Mock(UserOfHost))
+ self.cluster_shell.local_shell.hostname.return_value = 'node1'
+ self.cluster_shell.local_shell.get_effective_user_name.return_value = 'root'
+ self.cluster_shell.local_shell.can_run_as.return_value = True
+ self.cluster_shell.user_of_host.user_pair_for_ssh.return_value = ('alice', 'bob')
+
+ def test_subprocess_run_without_input(self):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ self.cluster_shell.user_of_host.user_pair_for_ssh.assert_called_once_with('node2')
+ args, kwargs = self.cluster_shell.local_shell.su_subprocess_run.call_args
+ self.assertEqual('alice', args[0])
+ self.assertIn('bob@node2', args[1])
+ self.assertIn('-u root', args[1])
+ self.assertEqual(b'foo', kwargs['input'])
+ self.assertEqual(subprocess.PIPE, kwargs['stdout'])
+ self.assertEqual(subprocess.PIPE, kwargs['stderr'])
+
+ def test_subprocess_run_without_input_with_input_kwargs(self):
+ with self.assertRaises(AssertionError):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ input=b'bar',
+ )
+ self.cluster_shell.local_shell.su_subprocess_run.assert_not_called()
+ with self.assertRaises(AssertionError):
+ self.cluster_shell.subprocess_run_without_input(
+ 'node2',
+ None,
+ 'foo',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ self.cluster_shell.local_shell.su_subprocess_run.assert_not_called()
diff --git a/test/unittests/test_time.py b/test/unittests/test_time.py
new file mode 100644
index 0000000..b252a5d
--- /dev/null
+++ b/test/unittests/test_time.py
@@ -0,0 +1,24 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+
+from crmsh import utils
+from crmsh import logtime
+import time
+import datetime
+import dateutil.tz
+
+
+def test_time_convert1():
+ loctz = dateutil.tz.tzlocal()
+ tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
+ dt = utils.parse_time('Jun 01, 2015 10:00:00')
+ assert logtime.human_date(dt) == time.strftime('%Y-%m-%d %H:%M:%S', tm)
+
+
+def test_time_convert2():
+ loctz = dateutil.tz.tzlocal()
+ tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
+ ts = time.localtime(utils.parse_to_timestamp('Jun 01, 2015 10:00:00'))
+ assert time.strftime('%Y-%m-%d %H:%M:%S', ts) == time.strftime('%Y-%m-%d %H:%M:%S', tm)
diff --git a/test/unittests/test_ui_cluster.py b/test/unittests/test_ui_cluster.py
new file mode 100644
index 0000000..a86936f
--- /dev/null
+++ b/test/unittests/test_ui_cluster.py
@@ -0,0 +1,173 @@
+import logging
+import unittest
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import ui_cluster
+
+logging.basicConfig(level=logging.INFO)
+
+class TestCluster(unittest.TestCase):
+ """
+ Unitary tests for class utils.IP
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ui_cluster_inst = ui_cluster.Cluster()
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ def test_do_start_already_started(self, mock_qdevice_configured, mock_parse_nodes, mock_active, mock_info):
+ mock_qdevice_configured.return_value = False
+ context_inst = mock.Mock()
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_active.side_effect = [True, True]
+ self.ui_cluster_inst.do_start(context_inst, "node1", "node2")
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_active.assert_has_calls([
+ mock.call("pacemaker.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node2")
+ ])
+ mock_info.assert_has_calls([
+ mock.call("The cluster stack already started on node1"),
+ mock.call("The cluster stack already started on node2")
+ ])
+
+ @mock.patch('crmsh.qdevice.QDevice.check_qdevice_vote')
+ @mock.patch('crmsh.bootstrap.start_pacemaker')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.utils.is_qdevice_configured')
+ @mock.patch('crmsh.service_manager.ServiceManager.start_service')
+ @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_start(self, mock_parse_nodes, mock_active, mock_start, mock_qdevice_configured, mock_info, mock_start_pacemaker, mock_check_qdevice):
+ context_inst = mock.Mock()
+ mock_start_pacemaker.return_value = ["node1"]
+ mock_parse_nodes.return_value = ["node1"]
+ mock_active.side_effect = [False, False]
+ mock_qdevice_configured.return_value = True
+
+ self.ui_cluster_inst.do_start(context_inst, "node1")
+
+ mock_active.assert_has_calls([
+ mock.call("pacemaker.service", remote_addr="node1"),
+ mock.call("corosync-qdevice.service", remote_addr="node1")
+ ])
+ mock_start.assert_called_once_with("corosync-qdevice", node_list=["node1"])
+ mock_qdevice_configured.assert_called_once_with()
+ mock_info.assert_called_once_with("The cluster stack started on node1")
+
+ @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+ @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_stop_return(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc):
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_node_ready_to_stop_cluster_service.side_effect = [False, False]
+
+ context_inst = mock.Mock()
+ self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
+
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), mock.call("node2")])
+ mock_dc.assert_not_called()
+
+ @mock.patch('logging.Logger.debug')
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ @mock.patch('crmsh.ui_cluster.Cluster._set_dlm')
+ @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+ @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
+ @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+ def test_do_stop(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc,
+ mock_set_dlm, mock_service_manager, mock_info, mock_debug):
+ mock_parse_nodes.return_value = ["node1", "node2"]
+ mock_node_ready_to_stop_cluster_service.side_effect = [True, False]
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.stop_service.side_effect = [["node1"], ["node1"], ["node1"]]
+ mock_service_manager_inst.service_is_active.return_value = True
+
+ context_inst = mock.Mock()
+ self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
+
+ mock_parse_nodes.assert_called_once_with(context_inst, "node1", "node2")
+ mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), mock.call("node2")])
+ mock_debug.assert_called_once_with("stop node list: ['node1']")
+ mock_dc.assert_called_once_with("node1")
+ mock_set_dlm.assert_called_once_with("node1")
+ mock_service_manager_inst.stop_service.assert_has_calls([
+ mock.call("pacemaker", node_list=["node1"]),
+ mock.call("corosync-qdevice.service", node_list=["node1"]),
+ mock.call("corosync", node_list=["node1"]),
+ ])
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service_corosync(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [False, True, False]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is False
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_service_manager_inst.stop_service.assert_called_once_with("corosync", remote_addr="node1")
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service_pacemaker(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [True, True, False]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is False
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_service_manager_inst.stop_service.assert_called_once_with("corosync", remote_addr="node1")
+ mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+ @mock.patch('logging.Logger.info')
+ @mock.patch('crmsh.ui_cluster.ServiceManager')
+ def test_node_ready_to_stop_cluster_service(self, mock_service_manager, mock_info):
+ mock_service_manager_inst = mock.Mock()
+ mock_service_manager.return_value = mock_service_manager_inst
+ mock_service_manager_inst.service_is_active.side_effect = [True, True, True]
+ res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+ assert res is True
+ mock_service_manager_inst.service_is_active.assert_has_calls([
+ mock.call("corosync.service", remote_addr="node1"),
+ mock.call("sbd.service", remote_addr="node1"),
+ mock.call("pacemaker.service", remote_addr="node1"),
+ ])
+ mock_info.assert_not_called()
diff --git a/test/unittests/test_upgradeuitl.py b/test/unittests/test_upgradeuitl.py
new file mode 100644
index 0000000..56c7284
--- /dev/null
+++ b/test/unittests/test_upgradeuitl.py
@@ -0,0 +1,54 @@
+import os
+import sys
+import unittest
+from unittest import mock
+
+from crmsh import upgradeutil
+
+
+class TestUpgradeCondition(unittest.TestCase):
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_force_upgrade(self, mock_stat: mock.MagicMock, mock_get_file_content):
+ mock_stat.return_value = mock.Mock(os.stat_result)
+ mock_get_file_content.return_value = b''
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_non_existent_seq(
+ self,
+ mock_stat: mock.MagicMock,
+ mock_get_file_content: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b''
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ')
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_seq_less_than_expected(
+ self,
+ mock_stat,
+ mock_get_file_content,
+ mock_current_upgrade_seq: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b'0.1\n'
+ mock_current_upgrade_seq.__gt__.return_value = True
+ self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
+
+ @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ')
+ @mock.patch('crmsh.upgradeutil._get_file_content')
+ @mock.patch('os.stat')
+ def test_is_upgrade_needed_by_seq_not_less_than_expected(
+ self,
+ mock_stat,
+ mock_get_file_content,
+ mock_current_upgrade_seq: mock.MagicMock,
+ ):
+ mock_stat.side_effect = FileNotFoundError()
+ mock_get_file_content.return_value = b'1.0\n'
+ mock_current_upgrade_seq.__gt__.return_value = False
+ self.assertFalse(upgradeutil._is_upgrade_needed(['node-1', 'node-2']))
diff --git a/test/unittests/test_utils.py b/test/unittests/test_utils.py
new file mode 100644
index 0000000..bf06fbd
--- /dev/null
+++ b/test/unittests/test_utils.py
@@ -0,0 +1,1514 @@
+from __future__ import unicode_literals
+# Copyright (C) 2014 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+#
+# unit tests for utils.py
+
+import os
+import socket
+import re
+import imp
+import subprocess
+import unittest
+import pytest
+import logging
+from unittest import mock
+from itertools import chain
+
+import crmsh.utils
+from crmsh import utils, config, tmpfiles, constants, parallax
+
+logging.basicConfig(level=logging.DEBUG)
+
+def setup_function():
+ utils._ip_for_cloud = None
+ # Mock memoize method and reload the module under test later with imp
+ mock.patch('crmsh.utils.memoize', lambda x: x).start()
+ imp.reload(utils)
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_print_cluster_nodes(mock_run):
+ mock_run.return_value = (0, "data", None)
+ utils.print_cluster_nodes()
+ mock_run.assert_called_once_with("crm_node -l")
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_package_is_installed_local(mock_run):
+ mock_run.return_value = (0, None)
+ res = utils.package_is_installed("crmsh")
+ assert res is True
+ mock_run.assert_called_once_with("rpm -q --quiet crmsh")
+
+
+@mock.patch('crmsh.utils.detect_file')
+def test_check_file_content_included_target_not_exist(mock_detect):
+ mock_detect.side_effect = [True, False]
+ res = utils.check_file_content_included("file1", "file2")
+ assert res is False
+ mock_detect.assert_has_calls([
+ mock.call("file1", remote=None),
+ mock.call("file2", remote=None)
+ ])
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.detect_file')
+def test_check_file_content_included(mock_detect, mock_run):
+ mock_detect.side_effect = [True, True]
+ mock_run.side_effect = ["data data", "data"]
+
+ res = utils.check_file_content_included("file1", "file2")
+ assert res is True
+
+ mock_detect.assert_has_calls([
+ mock.call("file1", remote=None),
+ mock.call("file2", remote=None)
+ ])
+ mock_run.assert_has_calls([
+ mock.call("cat file2", host=None),
+ mock.call("cat file1", host=None)
+ ])
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name_run_None1(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (1, None)
+ mock_re_search_inst = mock.Mock()
+ mock_re_search.return_value = mock_re_search_inst
+ res = utils.get_nodeid_from_name("node1")
+ assert res is None
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_not_called()
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name_run_None2(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (0, "172167901 node1 member\n172168231 node2 member")
+ mock_re_search.return_value = None
+ res = utils.get_nodeid_from_name("node111")
+ assert res is None
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_called_once_with(r'^([0-9]+) node111 ', mock_get_stdout.return_value[1], re.M)
+
+
+@mock.patch('re.search')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout')
+def test_get_nodeid_from_name(mock_get_stdout, mock_re_search):
+ mock_get_stdout.return_value = (0, "172167901 node1 member\n172168231 node2 member")
+ mock_re_search_inst = mock.Mock()
+ mock_re_search.return_value = mock_re_search_inst
+ mock_re_search_inst.group.return_value = '172168231'
+ res = utils.get_nodeid_from_name("node2")
+ assert res == '172168231'
+ mock_get_stdout.assert_called_once_with('crm_node -l')
+ mock_re_search.assert_called_once_with(r'^([0-9]+) node2 ', mock_get_stdout.return_value[1], re.M)
+ mock_re_search_inst.group.assert_called_once_with(1)
+
+
+@mock.patch('crmsh.sh.LocalShell.get_rc_and_error')
+def test_check_ssh_passwd_need(mock_run):
+ mock_run.return_value = (1, 'foo')
+ res = utils.check_ssh_passwd_need("bob", "alice", "node1")
+ assert res is True
+ mock_run.assert_called_once_with(
+ "bob",
+ " ssh -o StrictHostKeyChecking=no -o EscapeChar=none -o ConnectTimeout=15 -T -o Batchmode=yes alice@node1 true",
+ )
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_cluster_run_cmd_exception(mock_list_nodes):
+ mock_list_nodes.return_value = None
+ with pytest.raises(ValueError) as err:
+ utils.cluster_run_cmd("test")
+ assert str(err.value) == "Failed to get node list from cluster"
+ mock_list_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_list_cluster_nodes_except_me_exception(mock_list_nodes):
+ mock_list_nodes.return_value = None
+ with pytest.raises(ValueError) as err:
+ utils.list_cluster_nodes_except_me()
+ assert str(err.value) == "Failed to get node list from cluster"
+ mock_list_nodes.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.this_node')
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_list_cluster_nodes_except_me(mock_list_nodes, mock_this_node):
+ mock_list_nodes.return_value = ["node1", "node2"]
+ mock_this_node.return_value = "node1"
+ res = utils.list_cluster_nodes_except_me()
+ assert res == ["node2"]
+ mock_list_nodes.assert_called_once_with()
+ mock_this_node.assert_called_once_with()
+
+
+def test_to_ascii():
+ assert utils.to_ascii(None) is None
+ assert utils.to_ascii('test') == 'test'
+ assert utils.to_ascii(b'test') == 'test'
+ # Test not utf-8 characters
+ with mock.patch('traceback.print_exc') as mock_traceback:
+ assert utils.to_ascii(b'te\xe9st') == 'test'
+ mock_traceback.assert_called_once_with()
+
+
+def test_systeminfo():
+ assert utils.getuser() is not None
+ assert utils.gethomedir() is not None
+ assert utils.get_tempdir() is not None
+
+
+def test_shadowcib():
+ assert utils.get_cib_in_use() == ""
+ utils.set_cib_in_use("foo")
+ assert utils.get_cib_in_use() == "foo"
+ utils.clear_cib_in_use()
+ assert utils.get_cib_in_use() == ""
+
+
+def test_booleans():
+ truthy = ['yes', 'Yes', 'True', 'true', 'TRUE',
+ 'YES', 'on', 'On', 'ON']
+ falsy = ['no', 'false', 'off', 'OFF', 'FALSE', 'nO']
+ not_truthy = ['', 'not', 'ONN', 'TRUETH', 'yess']
+ for case in chain(truthy, falsy):
+ assert utils.verify_boolean(case) is True
+ for case in truthy:
+ assert utils.is_boolean_true(case) is True
+ assert utils.is_boolean_false(case) is False
+ assert utils.get_boolean(case) is True
+ for case in falsy:
+ assert utils.is_boolean_true(case) is False
+ assert utils.is_boolean_false(case) is True
+ assert utils.get_boolean(case, dflt=True) is False
+ for case in not_truthy:
+ assert utils.verify_boolean(case) is False
+ assert utils.is_boolean_true(case) is False
+ assert utils.is_boolean_false(case) is False
+ assert utils.get_boolean(case) is False
+
+
+def test_olist():
+ lst = utils.olist(['B', 'C', 'A'])
+ lst.append('f')
+ lst.append('aA')
+ lst.append('_')
+ assert 'aa' in lst
+ assert 'a' in lst
+ assert list(lst) == ['b', 'c', 'a', 'f', 'aa', '_']
+
+
+def test_add_sudo():
+ tmpuser = config.core.user
+ try:
+ config.core.user = 'root'
+ assert utils.add_sudo('ls').startswith('sudo')
+ config.core.user = ''
+ assert utils.add_sudo('ls') == 'ls'
+ finally:
+ config.core.user = tmpuser
+
+
+def test_str2tmp():
+ txt = "This is a test string"
+ filename = utils.str2tmp(txt)
+ assert os.path.isfile(filename)
+ assert open(filename).read() == txt + "\n"
+ assert utils.file2str(filename) == txt
+ os.unlink(filename)
+
+
+@mock.patch('logging.Logger.error')
+def test_sanity(mock_error):
+ sane_paths = ['foo/bar', 'foo', '/foo/bar', 'foo0',
+ 'foo_bar', 'foo-bar', '0foo', '.foo',
+ 'foo.bar']
+ insane_paths = ['#foo', 'foo?', 'foo*', 'foo$', 'foo[bar]',
+ 'foo`', "foo'", 'foo/*']
+ for p in sane_paths:
+ assert utils.is_path_sane(p)
+ for p in insane_paths:
+ assert not utils.is_path_sane(p)
+ sane_filenames = ['foo', '0foo', '0', '.foo']
+ insane_filenames = ['foo/bar']
+ for p in sane_filenames:
+ assert utils.is_filename_sane(p)
+ for p in insane_filenames:
+ assert not utils.is_filename_sane(p)
+ sane_names = ['foo']
+ insane_names = ["f'o"]
+ for n in sane_names:
+ assert utils.is_name_sane(n)
+ for n in insane_names:
+ assert not utils.is_name_sane(n)
+
+
+def test_nvpairs2dict():
+ assert utils.nvpairs2dict(['a=b', 'c=d']) == {'a': 'b', 'c': 'd'}
+ assert utils.nvpairs2dict(['a=b=c', 'c=d']) == {'a': 'b=c', 'c': 'd'}
+ assert utils.nvpairs2dict(['a']) == {'a': None}
+
+
+def test_validity():
+ assert utils.is_id_valid('foo0')
+ assert not utils.is_id_valid('0foo')
+
+
+def test_msec():
+ assert utils.crm_msec('1ms') == 1
+ assert utils.crm_msec('1s') == 1000
+ assert utils.crm_msec('1us') == 0
+ assert utils.crm_msec('1') == 1000
+ assert utils.crm_msec('1m') == 60*1000
+ assert utils.crm_msec('1h') == 60*60*1000
+
+
+def test_parse_sysconfig():
+ """
+ bsc#1129317: Fails on this line
+
+ FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+ """
+ s = '''
+FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+'''
+
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ sc = utils.parse_sysconfig(fname)
+ assert ("FW_SERVICES_ACCEPT_EXT" in sc)
+
+def test_sysconfig_set():
+ s = '''
+FW_SERVICES_ACCEPT_EXT="0/0,tcp,22,,hitcount=3,blockseconds=60,recentname=ssh"
+'''
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ utils.sysconfig_set(fname, FW_SERVICES_ACCEPT_EXT="foo=bar", FOO="bar")
+ sc = utils.parse_sysconfig(fname)
+ assert (sc.get("FW_SERVICES_ACCEPT_EXT") == "foo=bar")
+ assert (sc.get("FOO") == "bar")
+
+def test_sysconfig_set_bsc1145823():
+ s = '''# this is test
+#age=1000
+'''
+ fd, fname = tmpfiles.create()
+ with open(fname, 'w') as f:
+ f.write(s)
+ utils.sysconfig_set(fname, age="100")
+ sc = utils.parse_sysconfig(fname)
+ assert (sc.get("age") == "100")
+
+@mock.patch("crmsh.utils.IP.is_ipv6")
+@mock.patch("socket.socket")
+@mock.patch("crmsh.utils.closing")
+def test_check_port_open_false(mock_closing, mock_socket, mock_is_ipv6):
+ mock_is_ipv6.return_value = False
+ sock_inst = mock.Mock()
+ mock_socket.return_value = sock_inst
+ mock_closing.return_value.__enter__.return_value = sock_inst
+ sock_inst.connect_ex.return_value = 1
+
+ assert utils.check_port_open("10.10.10.1", 22) is False
+
+ mock_is_ipv6.assert_called_once_with("10.10.10.1")
+ mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
+ mock_closing.assert_called_once_with(sock_inst)
+ sock_inst.connect_ex.assert_called_once_with(("10.10.10.1", 22))
+
+@mock.patch("crmsh.utils.IP.is_ipv6")
+@mock.patch("socket.socket")
+@mock.patch("crmsh.utils.closing")
+def test_check_port_open_true(mock_closing, mock_socket, mock_is_ipv6):
+ mock_is_ipv6.return_value = True
+ sock_inst = mock.Mock()
+ mock_socket.return_value = sock_inst
+ mock_closing.return_value.__enter__.return_value = sock_inst
+ sock_inst.connect_ex.return_value = 0
+
+ assert utils.check_port_open("2001:db8:10::7", 22) is True
+
+ mock_is_ipv6.assert_called_once_with("2001:db8:10::7")
+ mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_STREAM)
+ mock_closing.assert_called_once_with(sock_inst)
+ sock_inst.connect_ex.assert_called_once_with(("2001:db8:10::7", 22))
+
+def test_valid_port():
+ assert utils.valid_port(1) is False
+ assert utils.valid_port(10000000) is False
+ assert utils.valid_port(1234) is True
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_configured_false(mock_get_value):
+ mock_get_value.return_value = "ip"
+ assert utils.is_qdevice_configured() is False
+ mock_get_value.assert_called_once_with("quorum.device.model")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_configured_true(mock_get_value):
+ mock_get_value.return_value = "net"
+ assert utils.is_qdevice_configured() is True
+ mock_get_value.assert_called_once_with("quorum.device.model")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_tls_on_false(mock_get_value):
+ mock_get_value.return_value = "off"
+ assert utils.is_qdevice_tls_on() is False
+ mock_get_value.assert_called_once_with("quorum.device.net.tls")
+
+@mock.patch("crmsh.corosync.get_value")
+def test_is_qdevice_tls_on_true(mock_get_value):
+ mock_get_value.return_value = "on"
+ assert utils.is_qdevice_tls_on() is True
+ mock_get_value.assert_called_once_with("quorum.device.net.tls")
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_get_nodeinfo_from_cmaptool_return_none(mock_get_stdout):
+ mock_get_stdout.return_value = (1, None)
+ assert bool(utils.get_nodeinfo_from_cmaptool()) is False
+ mock_get_stdout.assert_called_once_with("corosync-cmapctl -b runtime.totem.pg.mrp.srp.members")
+
+@mock.patch("re.findall")
+@mock.patch("re.search")
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_get_nodeinfo_from_cmaptool(mock_get_stdout, mock_search, mock_findall):
+ mock_get_stdout.return_value = (0, 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)\nruntime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ match_inst1 = mock.Mock()
+ match_inst2 = mock.Mock()
+ mock_search.side_effect = [match_inst1, match_inst2]
+ match_inst1.group.return_value = '1'
+ match_inst2.group.return_value = '2'
+ mock_findall.side_effect = [["192.168.43.129"], ["192.168.43.128"]]
+
+ result = utils.get_nodeinfo_from_cmaptool()
+ assert result['1'] == ["192.168.43.129"]
+ assert result['2'] == ["192.168.43.128"]
+
+ mock_get_stdout.assert_called_once_with("corosync-cmapctl -b runtime.totem.pg.mrp.srp.members")
+ mock_search.assert_has_calls([
+ mock.call(r'members\.(.*)\.ip', 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)'),
+ mock.call(r'members\.(.*)\.ip', 'runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ ])
+ match_inst1.group.assert_called_once_with(1)
+ match_inst2.group.assert_called_once_with(1)
+ mock_findall.assert_has_calls([
+ mock.call(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', 'runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.43.129)'),
+ mock.call(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', 'runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.43.128)')
+ ])
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_false_service_not_active(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = False
+ assert utils.valid_nodeid("3") is False
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_not_called()
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_false(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = True
+ mock_nodeinfo.return_value = {'1': ["10.10.10.1"], "2": ["20.20.20.2"]}
+ assert utils.valid_nodeid("3") is False
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_called_once_with()
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.service_manager.ServiceManager.service_is_active")
+def test_valid_nodeid_true(mock_is_active, mock_nodeinfo):
+ mock_is_active.return_value = True
+ mock_nodeinfo.return_value = {'1': ["10.10.10.1"], "2": ["20.20.20.2"]}
+ assert utils.valid_nodeid("2") is True
+ mock_is_active.assert_called_once_with('corosync.service')
+ mock_nodeinfo.assert_called_once_with()
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_false(mock_run):
+ mock_run.side_effect = ["test", "test"]
+ assert utils.detect_aws() is False
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_xen(mock_run):
+ mock_run.side_effect = ["4.2.amazon", "Xen"]
+ assert utils.detect_aws() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_aws_kvm(mock_run):
+ mock_run.side_effect = ["Not Specified", "Amazon EC2"]
+ assert utils.detect_aws() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-version"),
+ mock.call("dmidecode -s system-manufacturer")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_false(mock_run):
+ mock_run.side_effect = ["test", "test"]
+ assert utils.detect_azure() is False
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_microsoft_corporation(mock_run, mock_request):
+ mock_run.side_effect = ["microsoft corporation", "test"]
+ mock_request.return_value = "data"
+ assert utils.detect_azure() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_azure_chassis(mock_run, mock_request):
+ mock_run.side_effect = ["test", "7783-7084-3265-9085-8269-3286-77"]
+ mock_request.return_value = "data"
+ assert utils.detect_azure() is True
+ mock_run.assert_has_calls([
+ mock.call("dmidecode -s system-manufacturer"),
+ mock.call("dmidecode -s chassis-asset-tag")
+ ])
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_gcp_false(mock_run):
+ mock_run.return_value = "test"
+ assert utils.detect_gcp() is False
+ mock_run.assert_called_once_with("dmidecode -s bios-vendor")
+
+@mock.patch("crmsh.utils._cloud_metadata_request")
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_detect_gcp(mock_run, mock_request):
+ mock_run.return_value = "Google instance"
+ mock_request.return_value = "data"
+ assert utils.detect_gcp() is True
+ mock_run.assert_called_once_with("dmidecode -s bios-vendor")
+
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_no_cmd(mock_is_program):
+ mock_is_program.return_value = False
+ assert utils.detect_cloud() is None
+ mock_is_program.assert_called_once_with("dmidecode")
+
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_aws(mock_is_program, mock_aws):
+ mock_is_program.return_value = True
+ mock_aws.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_AWS
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+
+@mock.patch("crmsh.utils.detect_azure")
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_azure(mock_is_program, mock_aws, mock_azure):
+ mock_is_program.return_value = True
+ mock_aws.return_value = False
+ mock_azure.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_AZURE
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+ mock_azure.assert_called_once_with()
+
+@mock.patch("crmsh.utils.detect_gcp")
+@mock.patch("crmsh.utils.detect_azure")
+@mock.patch("crmsh.utils.detect_aws")
+@mock.patch("crmsh.utils.is_program")
+def test_detect_cloud_gcp(mock_is_program, mock_aws, mock_azure, mock_gcp):
+ mock_is_program.return_value = True
+ mock_aws.return_value = False
+ mock_azure.return_value = False
+ mock_gcp.return_value = True
+ assert utils.detect_cloud() == constants.CLOUD_GCP
+ mock_is_program.assert_called_once_with("dmidecode")
+ mock_aws.assert_called_once_with()
+ mock_azure.assert_called_once_with()
+ mock_gcp.assert_called_once_with()
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout")
+def test_interface_choice(mock_get_stdout):
+ ip_a_output = """
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
+ link/ether 52:54:00:9e:1b:4f brd ff:ff:ff:ff:ff:ff
+ inet 192.168.122.241/24 brd 192.168.122.255 scope global enp1s0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::5054:ff:fe9e:1b4f/64 scope link
+ valid_lft forever preferred_lft forever
+3: br-933fa0e1438c: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
+ link/ether 9e:fe:24:df:59:49 brd ff:ff:ff:ff:ff:ff
+ inet 10.10.10.1/24 brd 10.10.10.255 scope global br-933fa0e1438c
+ valid_lft forever preferred_lft forever
+ inet6 fe80::9cfe:24ff:fedf:5949/64 scope link
+ valid_lft forever preferred_lft forever
+4: veth3fff6e9@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
+ link/ether 1e:2c:b3:73:6b:42 brd ff:ff:ff:ff:ff:ff link-netnsid 0
+ inet6 fe80::1c2c:b3ff:fe73:6b42/64 scope link
+ valid_lft forever preferred_lft forever
+ valid_lft forever preferred_lft forever
+"""
+ mock_get_stdout.return_value = (0, ip_a_output)
+ assert utils.interface_choice() == ["enp1s0", "br-933fa0e1438c", "veth3fff6e9"]
+ mock_get_stdout.assert_called_once_with("ip a")
+
+
+class TestIP(unittest.TestCase):
+ """
+ Unitary tests for class utils.IP
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.ip_inst = utils.IP("10.10.10.1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('ipaddress.ip_address')
+ def test_ip_address(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock()
+ mock_ip_address.return_value = mock_ip_address_inst
+ self.ip_inst.ip_address
+ mock_ip_address.assert_called_once_with("10.10.10.1")
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_version(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(version=4)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = self.ip_inst.version
+ self.assertEqual(res, mock_ip_address_inst.version)
+ mock_ip_address.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_is_mcast(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(is_multicast=False)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = utils.IP.is_mcast("10.10.10.1")
+ self.assertEqual(res, False)
+ mock_ip_address.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.version', new_callable=mock.PropertyMock)
+ def test_is_ipv6(self, mock_version):
+ mock_version.return_value = 4
+ res = utils.IP.is_ipv6("10.10.10.1")
+ self.assertEqual(res, False)
+ mock_version.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.IP.ip_address', new_callable=mock.PropertyMock)
+ def test_is_loopback(self, mock_ip_address):
+ mock_ip_address_inst = mock.Mock(is_loopback=False)
+ mock_ip_address.return_value = mock_ip_address_inst
+ res = self.ip_inst.is_loopback
+ self.assertEqual(res, mock_ip_address_inst.is_loopback)
+ mock_ip_address.assert_called_once_with()
+
+
+class TestInterface(unittest.TestCase):
+ """
+ Unitary tests for class utils.Interface
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.interface = utils.Interface("10.10.10.123/24")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_ip_with_mask(self):
+ assert self.interface.ip_with_mask == "10.10.10.123/24"
+
+ @mock.patch('ipaddress.ip_interface')
+ def test_ip_interface(self, mock_ip_interface):
+ mock_ip_interface_inst = mock.Mock()
+ mock_ip_interface.return_value = mock_ip_interface_inst
+ self.interface.ip_interface
+ mock_ip_interface.assert_called_once_with("10.10.10.123/24")
+
+ @mock.patch('crmsh.utils.Interface.ip_interface', new_callable=mock.PropertyMock)
+ def test_network(self, mock_ip_interface):
+ mock_ip_interface_inst = mock.Mock()
+ mock_ip_interface.return_value = mock_ip_interface_inst
+ mock_ip_interface_inst.network = mock.Mock(network_address="10.10.10.0")
+ assert self.interface.network == "10.10.10.0"
+ mock_ip_interface.assert_called_once_with()
+
+
+class TestInterfacesInfo(unittest.TestCase):
+ """
+ Unitary tests for class utils.InterfacesInfo
+ """
+
+ network_output_error = """1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever
+2: enp1s0 inet 192.168.122.241/24 brd 192.168.122.255 scope global enp1s0
+61: tun0 inet 10.163.45.46 peer 10.163.45.45/32 scope global tun0"""
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.interfaces_info = utils.InterfacesInfo()
+ self.interfaces_info_with_second_hb = utils.InterfacesInfo(second_heartbeat=True)
+ self.interfaces_info_with_custom_nic = utils.InterfacesInfo(second_heartbeat=True, custom_nic_list=['eth1'])
+ self.interfaces_info_with_wrong_nic = utils.InterfacesInfo(custom_nic_list=['eth7'])
+ self.interfaces_info_fake = utils.InterfacesInfo()
+ self.interfaces_info_fake._nic_info_dict = {
+ "eth0": [mock.Mock(ip="10.10.10.1", network="10.10.10.0"), mock.Mock(ip="10.10.10.2", network="10.10.10.0")],
+ "eth1": [mock.Mock(ip="20.20.20.1", network="20.20.20.0")]
+ }
+ self.interfaces_info_fake._default_nic_list = ["eth7"]
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_interfaces_info_no_address(self, mock_run):
+ only_lo = "1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever"
+ mock_run.return_value = (0, only_lo, None)
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info.get_interfaces_info()
+ self.assertEqual("No address configured", str(err.exception))
+ mock_run.assert_called_once_with("ip -4 -o addr show")
+
+ @mock.patch('crmsh.utils.Interface')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_interfaces_info_one_addr(self, mock_run, mock_interface):
+ mock_run.return_value = (0, self.network_output_error, None)
+ mock_interface_inst_1 = mock.Mock(is_loopback=True, is_link_local=False)
+ mock_interface_inst_2 = mock.Mock(is_loopback=False, is_link_local=False)
+ mock_interface.side_effect = [mock_interface_inst_1, mock_interface_inst_2]
+
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info_with_second_hb.get_interfaces_info()
+ self.assertEqual("Cannot configure second heartbeat, since only one address is available", str(err.exception))
+
+ mock_run.assert_called_once_with("ip -4 -o addr show")
+ mock_interface.assert_has_calls([
+ mock.call("127.0.0.1/8"),
+ mock.call("192.168.122.241/24")
+ ])
+
+ def test_nic_list(self):
+ res = self.interfaces_info_fake.nic_list
+ self.assertEqual(res, ["eth0", "eth1"])
+
+ def test_interface_list(self):
+ res = self.interfaces_info_fake.interface_list
+ assert len(res) == 3
+
+ @mock.patch('crmsh.utils.InterfacesInfo.interface_list', new_callable=mock.PropertyMock)
+ def test_ip_list(self, mock_interface_list):
+ mock_interface_list.return_value = [
+ mock.Mock(ip="10.10.10.1"),
+ mock.Mock(ip="10.10.10.2")
+ ]
+ res = self.interfaces_info_fake.ip_list
+ self.assertEqual(res, ["10.10.10.1", "10.10.10.2"])
+ mock_interface_list.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_list', new_callable=mock.PropertyMock)
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ def test_get_local_ip_list(self, mock_get_info, mock_ip_list):
+ mock_ip_list.return_value = ["10.10.10.1", "10.10.10.2"]
+ res = utils.InterfacesInfo.get_local_ip_list(False)
+ self.assertEqual(res, mock_ip_list.return_value)
+ mock_get_info.assert_called_once_with()
+ mock_ip_list.assert_called_once_with()
+
+ @mock.patch('crmsh.utils.InterfacesInfo.ip_list', new_callable=mock.PropertyMock)
+ @mock.patch('crmsh.utils.IP.is_ipv6')
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ def test_ip_in_local(self, mock_get_info, mock_is_ipv6, mock_ip_list):
+ mock_is_ipv6.return_value = False
+ mock_ip_list.return_value = ["10.10.10.1", "10.10.10.2"]
+ res = utils.InterfacesInfo.ip_in_local("10.10.10.1")
+ assert res is True
+ mock_get_info.assert_called_once_with()
+ mock_ip_list.assert_called_once_with()
+ mock_is_ipv6.assert_called_once_with("10.10.10.1")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.interface_list', new_callable=mock.PropertyMock)
+ def test_network_list(self, mock_interface_list):
+ mock_interface_list.return_value = [
+ mock.Mock(network="10.10.10.0"),
+ mock.Mock(network="20.20.20.0")
+ ]
+ res = self.interfaces_info.network_list
+ self.assertEqual(res, list(set(["10.10.10.0", "20.20.20.0"])))
+ mock_interface_list.assert_called_once_with()
+
+ def test_nic_first_ip(self):
+ res = self.interfaces_info_fake._nic_first_ip("eth0")
+ self.assertEqual(res, "10.10.10.1")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ @mock.patch('logging.Logger.warning')
+ @mock.patch('crmsh.utils.InterfacesInfo.get_interfaces_info')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_default_nic_list_from_route_no_default(self, mock_run, mock_get_interfaces_info, mock_warn, mock_nic_list):
+ output = """10.10.10.0/24 dev eth1 proto kernel scope link src 10.10.10.51
+ 20.20.20.0/24 dev eth2 proto kernel scope link src 20.20.20.51"""
+ mock_run.return_value = (0, output, None)
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"]]
+
+ res = self.interfaces_info.get_default_nic_list_from_route()
+ self.assertEqual(res, ["eth0"])
+
+ mock_run.assert_called_once_with("ip -o route show")
+ mock_warn.assert_called_once_with("No default route configured. Using the first found nic")
+ mock_nic_list.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_default_nic_list_from_route(self, mock_run):
+ output = """default via 192.168.122.1 dev eth8 proto dhcp
+ 10.10.10.0/24 dev eth1 proto kernel scope link src 10.10.10.51
+ 20.20.20.0/24 dev eth2 proto kernel scope link src 20.20.20.51
+ 192.168.122.0/24 dev eth8 proto kernel scope link src 192.168.122.120"""
+ mock_run.return_value = (0, output, None)
+
+ res = self.interfaces_info.get_default_nic_list_from_route()
+ self.assertEqual(res, ["eth8"])
+
+ mock_run.assert_called_once_with("ip -o route show")
+
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ def test_get_default_ip_list_failed_detect(self, mock_nic_list):
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"]]
+
+ with self.assertRaises(ValueError) as err:
+ self.interfaces_info_with_wrong_nic.get_default_ip_list()
+ self.assertEqual("Failed to detect IP address for eth7", str(err.exception))
+
+ mock_nic_list.assert_has_calls([mock.call(), mock.call()])
+
+ @mock.patch('crmsh.utils.InterfacesInfo._nic_first_ip')
+ @mock.patch('crmsh.utils.InterfacesInfo.nic_list', new_callable=mock.PropertyMock)
+ def test_get_default_ip_list(self, mock_nic_list, mock_first_ip):
+ mock_nic_list.side_effect = [["eth0", "eth1"], ["eth0", "eth1"], ["eth0", "eth1"]]
+ mock_first_ip.side_effect = ["10.10.10.1", "20.20.20.1"]
+
+ res = self.interfaces_info_with_custom_nic.get_default_ip_list()
+ self.assertEqual(res, ["10.10.10.1", "20.20.20.1"])
+
+ mock_nic_list.assert_has_calls([mock.call(), mock.call(), mock.call()])
+ mock_first_ip.assert_has_calls([mock.call("eth1"), mock.call("eth0")])
+
+
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name_no_nodeid(mock_get_nodeid):
+ mock_get_nodeid.return_value = None
+ res = utils.get_iplist_from_name("test")
+ assert res == []
+ mock_get_nodeid.assert_called_once_with("test")
+
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name_no_nodeinfo(mock_get_nodeid, mock_get_nodeinfo):
+ mock_get_nodeid.return_value = "1"
+ mock_get_nodeinfo.return_value = None
+ res = utils.get_iplist_from_name("test")
+ assert res == []
+ mock_get_nodeid.assert_called_once_with("test")
+ mock_get_nodeinfo.assert_called_once_with()
+
+
+@mock.patch("crmsh.utils.get_nodeinfo_from_cmaptool")
+@mock.patch("crmsh.utils.get_nodeid_from_name")
+def test_get_iplist_from_name(mock_get_nodeid, mock_get_nodeinfo):
+ mock_get_nodeid.return_value = "1"
+ mock_get_nodeinfo.return_value = {"1": ["10.10.10.1"], "2": ["10.10.10.2"]}
+ res = utils.get_iplist_from_name("test")
+ assert res == ["10.10.10.1"]
+ mock_get_nodeid.assert_called_once_with("test")
+ mock_get_nodeinfo.assert_called_once_with()
+
+
+@mock.patch("crmsh.sh.ShellUtils.get_stdout_stderr")
+def test_ping_node(mock_run):
+ mock_run.return_value = (1, None, "error data")
+ with pytest.raises(ValueError) as err:
+ utils.ping_node("node_unreachable")
+ assert str(err.value) == 'host "node_unreachable" is unreachable: error data'
+ mock_run.assert_called_once_with("ping -c 1 node_unreachable")
+
+
+def test_calculate_quorate_status():
+ assert utils.calculate_quorate_status(3, 2) is True
+ assert utils.calculate_quorate_status(3, 1) is False
+
+
+@mock.patch("crmsh.sh.ClusterShell.get_stdout_or_raise_error")
+def test_get_quorum_votes_dict(mock_run):
+ mock_run.return_value = """
+Votequorum information
+----------------------
+Expected votes: 1
+Highest expected: 1
+Total votes: 1
+Quorum: 1
+Flags: Quorate
+ """
+ res = utils.get_quorum_votes_dict()
+ assert res == {'Expected': '1', 'Total': '1'}
+ mock_run.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+def test_re_split_string():
+ assert utils.re_split_string('[; ]', "/dev/sda1; /dev/sdb1 ; ") == ["/dev/sda1", "/dev/sdb1"]
+ assert utils.re_split_string('[; ]', "/dev/sda1 ") == ["/dev/sda1"]
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_has_dev_partitioned(mock_get_dev_info):
+ mock_get_dev_info.return_value = """
+disk
+part
+ """
+ res = utils.has_dev_partitioned("/dev/sda1")
+ assert res is True
+ mock_get_dev_info.assert_called_once_with("/dev/sda1", "NAME", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev_cannot_find_local(mock_get_dev_uuid):
+ mock_get_dev_uuid.return_value = ""
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "Cannot find UUID for /dev/sdb1 on local"
+ mock_get_dev_uuid.assert_called_once_with("/dev/sdb1")
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev_cannot_find_peer(mock_get_dev_uuid):
+ mock_get_dev_uuid.side_effect = ["1234", ""]
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "Cannot find UUID for /dev/sdb1 on node2"
+ mock_get_dev_uuid.assert_has_calls([
+ mock.call("/dev/sdb1"),
+ mock.call("/dev/sdb1", "node2")
+ ])
+
+
+@mock.patch('crmsh.utils.get_dev_uuid')
+def test_compare_uuid_with_peer_dev(mock_get_dev_uuid):
+ mock_get_dev_uuid.side_effect = ["1234", "5678"]
+ with pytest.raises(ValueError) as err:
+ utils.compare_uuid_with_peer_dev(["/dev/sdb1"], "node2")
+ assert str(err.value) == "UUID of /dev/sdb1 not same with peer node2"
+ mock_get_dev_uuid.assert_has_calls([
+ mock.call("/dev/sdb1"),
+ mock.call("/dev/sdb1", "node2")
+ ])
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_is_dev_used_for_lvm(mock_dev_info):
+ mock_dev_info.return_value = "lvm"
+ res = utils.is_dev_used_for_lvm("/dev/sda1")
+ assert res is True
+ mock_dev_info.assert_called_once_with("/dev/sda1", "TYPE", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_is_dev_a_plain_raw_disk_or_partition(mock_dev_info):
+ mock_dev_info.return_value = "raid1\nlvm"
+ res = utils.is_dev_a_plain_raw_disk_or_partition("/dev/md127")
+ assert res is False
+ mock_dev_info.assert_called_once_with("/dev/md127", "TYPE", peer=None)
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_dev_info(mock_run):
+ mock_run.return_value = "data"
+ res = utils.get_dev_info("/dev/sda1", "TYPE")
+ assert res == "data"
+ mock_run.assert_called_once_with("lsblk -fno TYPE /dev/sda1", None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_get_dev_fs_type(mock_get_info):
+ mock_get_info.return_value = "data"
+ res = utils.get_dev_fs_type("/dev/sda1")
+ assert res == "data"
+ mock_get_info.assert_called_once_with("/dev/sda1", "FSTYPE", peer=None)
+
+
+@mock.patch('crmsh.utils.get_dev_info')
+def test_get_dev_uuid(mock_get_info):
+ mock_get_info.return_value = "uuid"
+ res = utils.get_dev_uuid("/dev/sda1")
+ assert res == "uuid"
+ mock_get_info.assert_called_once_with("/dev/sda1", "UUID", peer=None)
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_pe_number_except(mock_run):
+ mock_run.return_value = "data"
+ with pytest.raises(ValueError) as err:
+ utils.get_pe_number("vg1")
+ assert str(err.value) == "Cannot find PE on VG(vg1)"
+ mock_run.assert_called_once_with("vgdisplay vg1")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_pe_number(mock_run):
+ mock_run.return_value = """
+PE Size 4.00 MiB
+Total PE 1534
+Alloc PE / Size 1534 / 5.99 GiB
+ """
+ res = utils.get_pe_number("vg1")
+ assert res == 1534
+ mock_run.assert_called_once_with("vgdisplay vg1")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_get_all_vg_name(mock_run):
+ mock_run.return_value = """
+--- Volume group ---
+ VG Name ocfs2-vg
+ System ID
+ """
+ res = utils.get_all_vg_name()
+ assert res == ["ocfs2-vg"]
+ mock_run.assert_called_once_with("vgdisplay")
+
+
+@mock.patch('crmsh.utils.randomword')
+def test_gen_unused_id(mock_rand):
+ mock_rand.return_value = "1234xxxx"
+ res = utils.gen_unused_id(["test-id"], "test-id")
+ assert res == "test-id-1234xxxx"
+ mock_rand.assert_called_once_with(6)
+
+
+@mock.patch('random.choice')
+def test_randomword(mock_rand):
+ import string
+ mock_rand.side_effect = ['z', 'f', 'k', 'e', 'c', 'd']
+ res = utils.randomword()
+ assert res == "zfkecd"
+ mock_rand.assert_has_calls([mock.call(string.ascii_lowercase) for x in range(6)])
+
+
+@mock.patch('crmsh.cibconfig.cib_factory')
+def test_all_exist_id(mock_cib):
+ mock_cib.refresh = mock.Mock()
+ mock_cib.id_list = mock.Mock()
+ mock_cib.id_list.return_value = ['1', '2']
+ res = utils.all_exist_id()
+ assert res == ['1', '2']
+ mock_cib.id_list.assert_called_once_with()
+ mock_cib.refresh.assert_called_once_with()
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_mount_point_used(mock_run):
+ mock_run.return_value = """
+/dev/vda2 on /usr/local type btrfs (rw,relatime,space_cache,subvolid=259,subvol=/@/usr/local)
+/dev/vda2 on /opt type btrfs (rw,relatime,space_cache,subvolid=263,subvol=/@/opt)
+/dev/vda2 on /var/lib/docker/btrfs type btrfs (rw,relatime,space_cache,subvolid=258,subvol=/@/var)
+ """
+ res = utils.has_mount_point_used("/opt")
+ assert res is True
+ mock_run.assert_called_once_with("mount")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_disk_mounted(mock_run):
+ mock_run.return_value = """
+/dev/vda2 on /usr/local type btrfs (rw,relatime,space_cache,subvolid=259,subvol=/@/usr/local)
+/dev/vda2 on /opt type btrfs (rw,relatime,space_cache,subvolid=263,subvol=/@/opt)
+/dev/vda2 on /var/lib/docker/btrfs type btrfs (rw,relatime,space_cache,subvolid=258,subvol=/@/var)
+ """
+ res = utils.has_disk_mounted("/dev/vda2")
+ assert res is True
+ mock_run.assert_called_once_with("mount")
+
+
+@mock.patch('crmsh.sbd.SBDManager.is_using_diskless_sbd')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_has_stonith_running(mock_run, mock_diskless):
+ mock_run.return_value = """
+stonith-sbd
+1 fence device found
+ """
+ mock_diskless.return_value = True
+ res = utils.has_stonith_running()
+ assert res is True
+ mock_run.assert_called_once_with("stonith_admin -L")
+ mock_diskless.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.S_ISBLK')
+@mock.patch('os.stat')
+def test_is_block_device_error(mock_stat, mock_isblk):
+ mock_stat_inst = mock.Mock(st_mode=12345)
+ mock_stat.return_value = mock_stat_inst
+ mock_isblk.side_effect = OSError
+ res = utils.is_block_device("/dev/sda1")
+ assert res is False
+ mock_stat.assert_called_once_with("/dev/sda1")
+ mock_isblk.assert_called_once_with(12345)
+
+
+@mock.patch('crmsh.utils.S_ISBLK')
+@mock.patch('os.stat')
+def test_is_block_device(mock_stat, mock_isblk):
+ mock_stat_inst = mock.Mock(st_mode=12345)
+ mock_stat.return_value = mock_stat_inst
+ mock_isblk.return_value = True
+ res = utils.is_block_device("/dev/sda1")
+ assert res is True
+ mock_stat.assert_called_once_with("/dev/sda1")
+ mock_isblk.assert_called_once_with(12345)
+
+
+@mock.patch('crmsh.utils.ping_node')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_check_all_nodes_reachable(mock_run, mock_ping):
+ mock_run.return_value = "1084783297 15sp2-1 member"
+ utils.check_all_nodes_reachable()
+ mock_run.assert_called_once_with("crm_node -l")
+ mock_ping.assert_called_once_with("15sp2-1")
+
+
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_detect_virt(mock_run):
+ mock_run.return_value = (0, None, None)
+ assert utils.detect_virt() is True
+ mock_run.assert_called_once_with("systemd-detect-virt")
+
+
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_is_standby(mock_run):
+ mock_run.return_value = """
+Node List:
+* Node 15sp2-1: standby
+ """
+ assert utils.is_standby("15sp2-1") is True
+ mock_run.assert_called_once_with("crm_mon -1")
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_get_dlm_option_dict(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = """
+key1=value1
+key2=value2
+ """
+ res_dict = utils.get_dlm_option_dict()
+ assert res_dict == {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("dlm_tool dump_config", None)
+
+
+@mock.patch('crmsh.utils.get_dlm_option_dict')
+def test_set_dlm_option_exception(mock_get_dict):
+ mock_get_dict.return_value = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ with pytest.raises(ValueError) as err:
+ utils.set_dlm_option(name="xin")
+ assert str(err.value) == '"name" is not dlm config option'
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+@mock.patch('crmsh.utils.get_dlm_option_dict')
+def test_set_dlm_option(mock_get_dict, mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_get_dict.return_value = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ utils.set_dlm_option(key2="test")
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with('dlm_tool set_config "key2=test"', None)
+
+
+@mock.patch('crmsh.utils.has_resource_configured')
+def test_is_dlm_configured(mock_configured):
+ mock_configured.return_value = True
+ assert utils.is_dlm_configured() is True
+ mock_configured.assert_called_once_with(constants.DLM_CONTROLD_RA, peer=None)
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_is_quorate_exception(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = "data"
+ with pytest.raises(ValueError) as err:
+ utils.is_quorate()
+ assert str(err.value) == "Failed to get quorate status from corosync-quorumtool"
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+@mock.patch('crmsh.sh.cluster_shell')
+def test_is_quorate(mock_run):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_stdout_or_raise_error.return_value = """
+Ring ID: 1084783297/440
+Quorate: Yes
+ """
+ assert utils.is_quorate() is True
+ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2})
+
+
+@mock.patch('crmsh.utils.etree.fromstring')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_none(mock_run, mock_etree):
+ mock_run.return_value = (0, "data", None)
+ mock_etree.return_value = None
+ res = utils.list_cluster_nodes()
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_etree.assert_called_once_with("data")
+
+
+@mock.patch('crmsh.utils.etree.fromstring')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_none_no_reg(mock_run, mock_etree):
+ mock_run.return_value = (0, "data", None)
+ mock_etree.return_value = None
+ res = utils.list_cluster_nodes(no_reg=True)
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=True)
+ mock_etree.assert_called_once_with("data")
+
+
+@mock.patch('os.path.isfile')
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes_cib_not_exist(mock_run, mock_env, mock_isfile):
+ mock_run.return_value = (1, None, None)
+ mock_env.return_value = constants.CIB_RAW_FILE
+ mock_isfile.return_value = False
+ res = utils.list_cluster_nodes()
+ assert res is None
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_env.assert_called_once_with("CIB_file", constants.CIB_RAW_FILE)
+ mock_isfile.assert_called_once_with(constants.CIB_RAW_FILE)
+
+
+@mock.patch('crmsh.xmlutil.file2cib_elem')
+@mock.patch('os.path.isfile')
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_list_cluster_nodes(mock_run, mock_env, mock_isfile, mock_file2elem):
+ mock_run.return_value = (1, None, None)
+ mock_env.return_value = constants.CIB_RAW_FILE
+ mock_isfile.return_value = True
+ mock_cib_inst = mock.Mock()
+ mock_file2elem.return_value = mock_cib_inst
+ mock_node_inst1 = mock.Mock()
+ mock_node_inst2 = mock.Mock()
+ mock_node_inst1.get.side_effect = ["node1", "remote"]
+ mock_node_inst2.get.side_effect = ["node2", "member"]
+ mock_cib_inst.xpath.side_effect = [[mock_node_inst1, mock_node_inst2], "data"]
+
+ res = utils.list_cluster_nodes()
+ assert res == ["node2"]
+
+ mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False)
+ mock_env.assert_called_once_with("CIB_file", constants.CIB_RAW_FILE)
+ mock_isfile.assert_called_once_with(constants.CIB_RAW_FILE)
+ mock_file2elem.assert_called_once_with(constants.CIB_RAW_FILE)
+ mock_cib_inst.xpath.assert_has_calls([
+ mock.call(constants.XML_NODE_PATH),
+ mock.call("//primitive[@id='node1']/instance_attributes/nvpair[@name='server']")
+ ])
+
+
+@mock.patch('os.getenv')
+@mock.patch('crmsh.sh.cluster_shell')
+def test_get_property(mock_run, mock_env):
+ mock_run_inst = mock.Mock()
+ mock_run.return_value = mock_run_inst
+ mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, "data", "")
+ mock_env.return_value = "cib.xml"
+ assert utils.get_property("no-quorum-policy") == "data"
+ mock_run_inst.get_rc_stdout_stderr_without_input.assert_called_once_with(None, "CIB_file=cib.xml sudo --preserve-env=CIB_file crm configure get_property no-quorum-policy")
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.utils.get_property')
+def test_set_property(mock_get, mock_run, mock_warn):
+ mock_get.return_value = "start"
+ utils.set_property("no-quorum-policy", "stop")
+ mock_run.assert_called_once_with("crm configure property no-quorum-policy=stop")
+ mock_warn.assert_called_once_with('"no-quorum-policy" in crm_config is set to stop, it was start')
+
+
+@mock.patch('crmsh.utils.get_property')
+def test_set_property_the_same(mock_get):
+ mock_get.return_value = "value1"
+ utils.set_property("no-quorum-policy", "value1")
+ mock_get.assert_called_once_with("no-quorum-policy", "crm_config")
+
+
+@mock.patch('crmsh.utils.crm_msec')
+@mock.patch('crmsh.utils.get_property')
+def test_set_property_conditional(mock_get, mock_msec):
+ mock_get.return_value = "10s"
+ mock_msec.side_effect = ["1000", "1000"]
+ utils.set_property("timeout", "10", conditional=True)
+ mock_get.assert_called_once_with("timeout", "crm_config")
+ mock_msec.assert_has_calls([mock.call("10s"), mock.call("10")])
+
+
+@mock.patch('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm_return(mock_dlm):
+ mock_dlm.return_value = False
+ utils.check_no_quorum_policy_with_dlm()
+ mock_dlm.assert_called_once_with()
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.get_property')
+@mock.patch('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm(mock_dlm, mock_get_property, mock_warn):
+ mock_dlm.return_value = True
+ mock_get_property.return_value = "stop"
+ utils.check_no_quorum_policy_with_dlm()
+ mock_dlm.assert_called_once_with()
+ mock_get_property.assert_called_once_with("no-quorum-policy")
+ mock_warn.assert_called_once_with('The DLM cluster best practice suggests to set the cluster property "no-quorum-policy=freeze"')
+
+
+@mock.patch('crmsh.utils.is_qdevice_configured')
+@mock.patch('crmsh.utils.list_cluster_nodes')
+def test_is_2node_cluster_without_qdevice(mock_list, mock_is_qdevice):
+ mock_list.return_value = ["node1", "node2"]
+ mock_is_qdevice.return_value = False
+ res = utils.is_2node_cluster_without_qdevice()
+ assert res is True
+ mock_list.assert_called_once_with()
+ mock_is_qdevice.assert_called_once_with()
+
+
+def test_get_systemd_timeout_start_in_sec():
+ res = utils.get_systemd_timeout_start_in_sec("1min 31s")
+ assert res == 91
+
+
+@mock.patch('crmsh.utils.is_larger_than_min_version')
+@mock.patch('crmsh.cibconfig.cib_factory')
+def test_is_ocf_1_1_cib_schema_detected(mock_cib, mock_larger):
+ config.core.OCF_1_1_SUPPORT = True
+ mock_cib.get_schema = mock.Mock()
+ mock_cib.get_schema.return_value = "pacemaker-3.5"
+ mock_larger.return_value = True
+ assert utils.is_ocf_1_1_cib_schema_detected() is True
+ mock_cib.get_schema.assert_called_once_with()
+ mock_larger.assert_called_once_with("pacemaker-3.5", constants.SCHEMA_MIN_VER_SUPPORT_OCF_1_1)
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1(mock_support, mock_warn):
+ mock_support.return_value = False
+ assert utils.handle_role_for_ocf_1_1("Promoted") == "Master"
+ mock_support.assert_called_once_with()
+ mock_warn.assert_called_once_with('Convert "%s" to "%s" since the current schema version is old and not upgraded yet. Please consider "%s"', "Promoted", "Master", constants.CIB_UPGRADE)
+
+
+@mock.patch('logging.Logger.info')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_convert_new(mock_support, mock_info):
+ config.core.OCF_1_1_SUPPORT = True
+ mock_support.return_value = True
+ assert utils.handle_role_for_ocf_1_1("Master") == "Promoted"
+ mock_support.assert_called_once_with()
+ mock_info.assert_called_once_with('Convert deprecated "%s" to "%s"', "Master", "Promoted")
+
+
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_return(mock_support):
+ mock_support.return_value = True
+ assert utils.handle_role_for_ocf_1_1("Promoted") == "Promoted"
+ mock_support.assert_called_once_with()
+
+
+def test_handle_role_for_ocf_1_1_return_not_role():
+ assert utils.handle_role_for_ocf_1_1("test", name='other') == "test"
+
+
+def test_compatible_role():
+ assert utils.compatible_role("Slave", "Unpromoted") is True
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+def test_fetch_cluster_node_list_from_node(mock_run, mock_warn):
+ mock_run.return_value = """
+
+ 1 node1
+ 2 node2 lost
+ 3 node3 member
+ """
+ assert utils.fetch_cluster_node_list_from_node("node1") == ["node3"]
+ mock_run.assert_called_once_with("crm_node -l", "node1")
+ mock_warn.assert_has_calls([
+ mock.call("The node '%s' has no known name and/or state information", "1"),
+ mock.call("The node '%s'(state '%s') is not a current member", "node2", "lost")
+ ])
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes_except_me')
+def test_cluster_copy_file_return(mock_list_nodes):
+ mock_list_nodes.return_value = []
+ assert utils.cluster_copy_file("/file1") == True
+
+
+@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+def test_has_sudo_access(mock_run):
+ mock_run.return_value = (0, None, None)
+ assert utils.has_sudo_access() is True
+ mock_run.assert_called_once_with("sudo -S -k -n id -u")
+
+
+@mock.patch('grp.getgrgid')
+@mock.patch('os.getgroups')
+def test_in_haclient(mock_group, mock_getgrgid):
+ mock_group.return_value = [90, 100]
+ mock_getgrgid_inst1 = mock.Mock(gr_name=constants.HA_GROUP)
+ mock_getgrgid_inst2 = mock.Mock(gr_name="other")
+ mock_getgrgid.side_effect = [mock_getgrgid_inst1, mock_getgrgid_inst2]
+ assert utils.in_haclient() is True
+ mock_group.assert_called_once_with()
+
+
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_root(mock_user, mock_in):
+ mock_user.return_value = 'root'
+ utils.check_user_access('cluster')
+ mock_in.assert_not_called()
+
+
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_haclient(mock_user, mock_in, mock_sudo):
+ mock_user.return_value = 'user'
+ mock_in.return_value = True
+ utils.check_user_access('ra')
+ mock_sudo.assert_not_called()
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_need_sudo(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = True
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('ra')
+ mock_error.assert_called_once_with('Please run this command starting with "sudo"')
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_acl(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = False
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('ra')
+ mock_error.assert_called_once_with('This command needs higher privilege.\nOption 1) Please consider to add "user" as sudoer. For example:\n sudo bash -c \'echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user\'\nOption 2) Add "user" to the haclient group. For example:\n sudo usermod -g haclient user')
+
+
+@mock.patch('logging.Logger.error')
+@mock.patch('crmsh.utils.has_sudo_access')
+@mock.patch('crmsh.utils.in_haclient')
+@mock.patch('crmsh.userdir.getuser')
+def test_check_user_access_cluster(mock_user, mock_in, mock_sudo, mock_error):
+ mock_user.return_value = 'user'
+ mock_in.return_value = False
+ mock_sudo.return_value = False
+ with pytest.raises(utils.TerminateSubCommand) as err:
+ utils.check_user_access('cluster')
+ mock_error.assert_called_once_with('Please run this command starting with "sudo".\nCurrently, this command needs to use sudo to escalate itself as root.\nPlease consider to add "user" as sudoer. For example:\n sudo bash -c \'echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user\'')
diff --git a/test/unittests/test_watchdog.py b/test/unittests/test_watchdog.py
new file mode 100644
index 0000000..957f21f
--- /dev/null
+++ b/test/unittests/test_watchdog.py
@@ -0,0 +1,311 @@
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import watchdog
+from crmsh import bootstrap
+from crmsh import constants
+
+
+class TestWatchdog(unittest.TestCase):
+ """
+ Unitary tests for crmsh.watchdog.Watchdog
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Global setUp.
+ """
+
+ def setUp(self):
+ """
+ Test setUp.
+ """
+ self.watchdog_inst = watchdog.Watchdog()
+ self.watchdog_join_inst = watchdog.Watchdog(remote_user="alice", peer_host="node1")
+
+ def tearDown(self):
+ """
+ Test tearDown.
+ """
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ Global tearDown.
+ """
+
+ def test_watchdog_device_name(self):
+ res = self.watchdog_inst.watchdog_device_name
+ assert res is None
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device_ignore_error(self, mock_run):
+ mock_run.return_value = (1, None, "error")
+ res = self.watchdog_inst._verify_watchdog_device("/dev/watchdog", True)
+ self.assertEqual(res, False)
+ mock_run.assert_called_once_with("wdctl /dev/watchdog")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError) as err:
+ self.watchdog_inst._verify_watchdog_device("/dev/watchdog")
+ mock_error.assert_called_once_with("Invalid watchdog device /dev/watchdog: error")
+ mock_run.assert_called_once_with("wdctl /dev/watchdog")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_verify_watchdog_device(self, mock_run):
+ mock_run.return_value = (0, None, None)
+ res = self.watchdog_inst._verify_watchdog_device("/dev/watchdog")
+ self.assertEqual(res, True)
+
+ @mock.patch('crmsh.watchdog.invoke')
+ def test_load_watchdog_driver(self, mock_invoke):
+ self.watchdog_inst._load_watchdog_driver("softdog")
+ mock_invoke.assert_has_calls([
+ mock.call("echo softdog > /etc/modules-load.d/watchdog.conf"),
+ mock.call("systemctl restart systemd-modules-load")
+ ])
+
+ @mock.patch('crmsh.utils.parse_sysconfig')
+ def test_get_watchdog_device_from_sbd_config(self, mock_parse):
+ mock_parse_inst = mock.Mock()
+ mock_parse.return_value = mock_parse_inst
+ mock_parse_inst.get.return_value = "/dev/watchdog"
+ res = self.watchdog_inst._get_watchdog_device_from_sbd_config()
+ self.assertEqual(res, "/dev/watchdog")
+ mock_parse.assert_called_once_with(bootstrap.SYSCONFIG_SBD)
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_driver_is_loaded(self, mock_run):
+ output = """
+button 24576 0
+softdog 16384 2
+btrfs 1474560 1
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.watchdog_inst._driver_is_loaded("softdog")
+ assert res is not None
+ mock_run.assert_called_once_with("lsmod")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_set_watchdog_info_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ mock_error.side_effect = ValueError
+ with self.assertRaises(ValueError):
+ self.watchdog_inst._set_watchdog_info()
+ mock_run.assert_called_once_with(watchdog.Watchdog.QUERY_CMD)
+ mock_error.assert_called_once_with("Failed to run {}: error".format(watchdog.Watchdog.QUERY_CMD))
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_set_watchdog_info(self, mock_run):
+ output = """
+Discovered 3 watchdog devices:
+
+[1] /dev/watchdog
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[2] /dev/watchdog0
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[3] /dev/watchdog1
+Identity: iTCO_wdt
+Driver: iTCO_wdt
+ """
+ mock_run.return_value = (0, output, None)
+ self.watchdog_inst._set_watchdog_info()
+ self.assertEqual(self.watchdog_inst._watchdog_info_dict, {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'})
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_device_through_driver_none(self, mock_verify):
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ mock_verify.return_value = False
+ res = self.watchdog_inst._get_device_through_driver("iTCO_wdt")
+ self.assertEqual(res, None)
+ mock_verify.assert_called_once_with("/dev/watchdog1")
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_device_through_driver(self, mock_verify):
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ mock_verify.return_value = True
+ res = self.watchdog_inst._get_device_through_driver("iTCO_wdt")
+ self.assertEqual(res, "/dev/watchdog1")
+ mock_verify.assert_called_once_with("/dev/watchdog1")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely_error(self, mock_run, mock_error):
+ mock_run.return_value = (1, None, "error")
+ self.watchdog_join_inst._get_driver_through_device_remotely("test")
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+ mock_error.assert_called_once_with("Failed to run sudo sbd query-watchdog remotely: error")
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely_none(self, mock_run):
+ mock_run.return_value = (0, "data", None)
+ res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog")
+ self.assertEqual(res, None)
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+
+ @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+ def test_get_driver_through_device_remotely(self, mock_run):
+ output = """
+Discovered 3 watchdog devices:
+
+[1] /dev/watchdog
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[2] /dev/watchdog0
+Identity: Software Watchdog
+Driver: softdog
+CAUTION: Not recommended for use with sbd.
+
+[3] /dev/watchdog1
+Identity: iTCO_wdt
+Driver: iTCO_wdt
+ """
+ mock_run.return_value = (0, output, None)
+ res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog")
+ self.assertEqual(res, "softdog")
+ mock_run.assert_called_once_with("ssh {} alice@node1 sudo sbd query-watchdog".format(constants.SSH_OPTION))
+
+ def test_get_first_unused_device_none(self):
+ res = self.watchdog_inst._get_first_unused_device()
+ self.assertEqual(res, None)
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_get_first_unused_device(self, mock_verify):
+ mock_verify.return_value = True
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ res = self.watchdog_inst._get_first_unused_device()
+ self.assertEqual(res, "/dev/watchdog")
+ mock_verify.assert_called_once_with("/dev/watchdog", ignore_error=True)
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_first_unused_device')
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ def test_set_input_from_config(self, mock_from_config, mock_verify, mock_first):
+ mock_from_config.return_value = "/dev/watchdog"
+ mock_verify.return_value = True
+ self.watchdog_inst._set_input()
+ mock_first.assert_not_called()
+ mock_from_config.assert_called_once_with()
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_first_unused_device')
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ def test_set_input(self, mock_from_config, mock_verify, mock_first):
+ mock_from_config.return_value = None
+ mock_first.return_value = None
+ self.watchdog_inst._set_input()
+ self.assertEqual(self.watchdog_inst._input, "softdog")
+ mock_from_config.assert_called_once_with()
+ mock_verify.assert_not_called()
+ mock_first.assert_called_once_with()
+
+ def test_valid_device_false(self):
+ res = self.watchdog_inst._valid_device("test")
+ self.assertEqual(res, False)
+
+ @mock.patch('crmsh.watchdog.Watchdog._verify_watchdog_device')
+ def test_valid_device(self, mock_verify):
+ mock_verify.return_value = True
+ self.watchdog_inst._watchdog_info_dict = {'/dev/watchdog': 'softdog', '/dev/watchdog0': 'softdog', '/dev/watchdog1': 'iTCO_wdt'}
+ res = self.watchdog_inst._valid_device("/dev/watchdog")
+ self.assertEqual(res, True)
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_join_watchdog_error(self, mock_set_info, mock_from_config, mock_error):
+ mock_from_config.return_value = None
+ mock_error.side_effect = SystemExit
+ with self.assertRaises(SystemExit):
+ self.watchdog_join_inst.join_watchdog()
+ mock_set_info.assert_called_once_with()
+ mock_from_config.assert_called_once_with()
+ mock_error.assert_called_once_with("Failed to get watchdog device from {}".format(bootstrap.SYSCONFIG_SBD))
+
+ @mock.patch('crmsh.watchdog.Watchdog._load_watchdog_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._get_driver_through_device_remotely')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._get_watchdog_device_from_sbd_config')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_join_watchdog(self, mock_set_info, mock_from_config, mock_valid, mock_get_driver_remotely, mock_load):
+ mock_from_config.return_value = "/dev/watchdog"
+ mock_valid.return_value = False
+ mock_get_driver_remotely.return_value = "softdog"
+
+ self.watchdog_join_inst.join_watchdog()
+
+ mock_set_info.assert_called_once_with()
+ mock_from_config.assert_called_once_with()
+ mock_valid.assert_called_once_with("/dev/watchdog")
+ mock_get_driver_remotely.assert_called_once_with("/dev/watchdog")
+ mock_load.assert_called_once_with("softdog")
+
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog_valid(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc):
+ mock_valid.return_value = True
+ self.watchdog_inst._input = "/dev/watchdog"
+ self.watchdog_inst.init_watchdog()
+ mock_invokerc.assert_not_called()
+ mock_valid.assert_called_once_with("/dev/watchdog")
+
+ @mock.patch('crmsh.utils.fatal')
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog_error(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc, mock_error):
+ mock_valid.return_value = False
+ mock_invokerc.return_value = False
+ self.watchdog_inst._input = "test"
+ mock_error.side_effect = SystemExit
+
+ with self.assertRaises(SystemExit):
+ self.watchdog_inst.init_watchdog()
+
+ mock_valid.assert_called_once_with("test")
+ mock_invokerc.assert_called_once_with("modinfo test")
+ mock_error.assert_called_once_with("Should provide valid watchdog device or driver name by -w option")
+
+ @mock.patch('crmsh.watchdog.Watchdog._get_device_through_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._load_watchdog_driver')
+ @mock.patch('crmsh.watchdog.Watchdog._driver_is_loaded')
+ @mock.patch('crmsh.watchdog.invokerc')
+ @mock.patch('crmsh.watchdog.Watchdog._valid_device')
+ @mock.patch('crmsh.watchdog.Watchdog._set_input')
+ @mock.patch('crmsh.watchdog.Watchdog._set_watchdog_info')
+ def test_init_watchdog(self, mock_set_info, mock_set_input, mock_valid, mock_invokerc, mock_is_loaded, mock_load, mock_get_device):
+ mock_valid.return_value = False
+ self.watchdog_inst._input = "softdog"
+ mock_invokerc.return_value = True
+ mock_is_loaded.return_value = False
+ mock_get_device.return_value = "/dev/watchdog"
+
+ self.watchdog_inst.init_watchdog()
+
+ mock_valid.assert_called_once_with("softdog")
+ mock_invokerc.assert_called_once_with("modinfo softdog")
+ mock_is_loaded.assert_called_once_with("softdog")
+ mock_load.assert_called_once_with("softdog")
+ mock_set_info.assert_has_calls([mock.call(), mock.call()])
+ mock_get_device.assert_called_once_with("softdog")
diff --git a/test/unittests/test_xmlutil.py b/test/unittests/test_xmlutil.py
new file mode 100644
index 0000000..48393bf
--- /dev/null
+++ b/test/unittests/test_xmlutil.py
@@ -0,0 +1,61 @@
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+from crmsh import xmlutil, constants
+
+
+class TestCrmMonXmlParser(unittest.TestCase):
+ """
+ Unitary tests for crmsh.xmlutil.CrmMonXmlParser
+ """
+ @mock.patch('crmsh.sh.cluster_shell')
+ def setUp(self, mock_cluster_shell):
+ """
+ Test setUp.
+ """
+ data = '''
+<data>
+ <nodes>
+ <node name="tbw-1" id="1084783148" online="true" standby="true" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="3" type="member"/>
+ <node name="tbw-2" id="1084783312" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
+ </nodes>
+ <resources>
+ <resource id="ocfs2-dlm" resource_agent="ocf::pacemaker:controld" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="tbw-2" id="1084783312" cached="true"/>
+ </resource>
+ <resource id="ocfs2-clusterfs" resource_agent="ocf::heartbeat:Filesystem" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="tbw-2" id="1084783312" cached="true"/>
+ </resource>
+ </resources>
+</data>
+ '''
+ mock_cluster_shell().get_rc_stdout_stderr_without_input.return_value = (0, data, '')
+ self.parser_inst = xmlutil.CrmMonXmlParser()
+
+ def test_is_node_online(self):
+ assert self.parser_inst.is_node_online("tbw-1") is True
+ assert self.parser_inst.is_node_online("tbw-2") is False
+
+ def test_get_node_list(self):
+ assert self.parser_inst.get_node_list("standby") == ['tbw-1']
+ assert self.parser_inst.get_node_list("online") == ['tbw-2']
+
+ def test_is_resource_configured(self):
+ assert self.parser_inst.is_resource_configured("test") is False
+ assert self.parser_inst.is_resource_configured("ocf::heartbeat:Filesystem") is True
+
+ def test_is_any_resource_running(self):
+ assert self.parser_inst.is_any_resource_running() is True
+
+ def test_is_resource_started(self):
+ assert self.parser_inst.is_resource_started("test") is False
+ assert self.parser_inst.is_resource_started("ocfs2-clusterfs") is True
+ assert self.parser_inst.is_resource_started("ocf::pacemaker:controld") is True
+
+ def test_get_resource_id_list_via_type(self):
+ assert self.parser_inst.get_resource_id_list_via_type("test") == []
+ assert self.parser_inst.get_resource_id_list_via_type("ocf::pacemaker:controld")[0] == "ocfs2-dlm"
diff --git a/test/update-expected-output.sh b/test/update-expected-output.sh
new file mode 100755
index 0000000..496b73d
--- /dev/null
+++ b/test/update-expected-output.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+crmtestout="$1"
+
+[ -d "$crmtestout" ] || { echo "usage: $0 <test-output-dir>"; exit 1; }
+
+for f in $crmtestout/*.diff; do
+ fil=$(grep -- --- $f | awk '{print $2}' | sed 's/\/usr\/share\/crmsh\/tests/\/test/g')
+ awk "NR==1{\$2=\"a$fil\"}1" < "$f" | awk "NR==2{\$2=\"b$fil\"}1"
+done
diff --git a/test_container/Dockerfile b/test_container/Dockerfile
new file mode 100644
index 0000000..c099d31
--- /dev/null
+++ b/test_container/Dockerfile
@@ -0,0 +1,28 @@
+FROM opensuse/leap:15.5
+MAINTAINER Xin Liang <XLiang@suse.com>
+
+CMD ["/usr/lib/systemd/systemd", "--system"]
+
+RUN zypper refresh && \
+ zypper -n install systemd \
+ make autoconf automake vim which libxslt-tools mailx iproute2 iputils bzip2 openssh tar file glibc-locale-base firewalld libopenssl1_1 dos2unix iptables \
+ python3 python3-pip python3-lxml python3-python-dateutil python3-setuptools python3-PyYAML python3-curses python3-behave \
+ csync2 libglue-devel corosync corosync-qdevice pacemaker booth corosync-qnetd
+RUN zypper --non-interactive up zypper && \
+ zypper ar -f -G https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15_SP4 repo_nhf && \
+ zypper --non-interactive refresh && \
+ zypper --non-interactive up --allow-vendor-change -y resource-agents libqb100 pacemaker
+
+RUN ssh-keygen -t rsa -f /root/.ssh/id_rsa -N '' && \
+ cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \
+ chmod 0600 /root/.ssh/authorized_keys
+
+
+RUN python3 -m pip install coverage
+
+RUN mkdir -p /var/log/crmsh
+
+COPY behave_agent.py /opt
+COPY behave-agent.socket /etc/systemd/system
+COPY behave-agent@.service /etc/systemd/system
+RUN systemctl enable behave-agent.socket
diff --git a/test_container/behave-agent.socket b/test_container/behave-agent.socket
new file mode 100644
index 0000000..1212d30
--- /dev/null
+++ b/test_container/behave-agent.socket
@@ -0,0 +1,9 @@
+[Unit]
+Description=behave test agent
+
+[Socket]
+ListenStream=1122
+Accept=yes
+
+[Install]
+WantedBy=sockets.target
diff --git a/test_container/behave-agent@.service b/test_container/behave-agent@.service
new file mode 100644
index 0000000..eadc420
--- /dev/null
+++ b/test_container/behave-agent@.service
@@ -0,0 +1,9 @@
+[Unit]
+Description=behave test agent
+CollectMode=inactive-or-failed
+
+[Service]
+ExecStart=/opt/behave_agent.py
+StandardInput=socket
+StandardOutput=socket
+StandardError=journal
diff --git a/test_container/behave_agent.py b/test_container/behave_agent.py
new file mode 100755
index 0000000..49d32d4
--- /dev/null
+++ b/test_container/behave_agent.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+import io
+import os
+import pwd
+import socket
+import struct
+import subprocess
+
+
+MSG_EOF = 0
+MSG_USER = 1
+MSG_CMD = 2
+MSG_OUT = 4
+MSG_ERR = 5
+MSG_RC = 6
+
+
+class Message:
+ @staticmethod
+ def write(output, type: int, data: bytes):
+ output.write(struct.pack('!ii', type, len(data)))
+ output.write(data)
+
+ @staticmethod
+ def read(input):
+ buf = input.read(8)
+ type, length = struct.unpack('!ii', buf)
+ if length > 0:
+ buf = input.read(length)
+ else:
+ buf = b''
+ return type, buf
+
+
+class SocketIO(io.RawIOBase):
+ def __init__(self, s: socket.socket):
+ self._socket = s
+
+ def readable(self) -> bool:
+ return True
+
+ def writable(self) -> bool:
+ return True
+
+ def read(self, __size: int = -1) -> bytes:
+ return self._socket.recv(__size)
+
+ def readinto(self, __buffer) -> int:
+ return self._socket.recv_into(__buffer)
+
+ def readall(self) -> bytes:
+ raise NotImplementedError
+
+ def write(self, __b) -> int:
+ return self._socket.send(__b)
+
+
+def call(host: str, port: int, cmdline: str):
+ family, type, proto, _, sockaddr = socket.getaddrinfo(host, port, type=socket.SOCK_STREAM)[0]
+ with socket.socket(family, type, proto) as s:
+ s.connect(sockaddr)
+ sout = io.BufferedWriter(SocketIO(s), 4096)
+ Message.write(sout, MSG_USER, _getuser().encode('utf-8'))
+ Message.write(sout, MSG_CMD, cmdline.encode('utf-8'))
+ Message.write(sout, MSG_EOF, b'')
+ sout.flush()
+ s.shutdown(socket.SHUT_WR)
+ rc = None
+ stdout = []
+ stderr = []
+ sin = io.BufferedReader(SocketIO(s), 4096)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_OUT:
+ stdout.append(buf)
+ elif type == MSG_ERR:
+ stderr.append(buf)
+ elif type == MSG_RC:
+ rc, = struct.unpack('!i', buf)
+ elif type == MSG_EOF:
+ s.shutdown(socket.SHUT_RD)
+ assert rc is not None
+ return rc, b''.join(stdout), b''.join(stderr)
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+
+
+def serve(stdin, stdout, stderr):
+ assert os.geteuid() == 0
+ user = None
+ cmd = None
+ sin = io.BufferedReader(stdin)
+ while True:
+ type, buf = Message.read(sin)
+ if type == MSG_USER:
+ user = buf.decode('utf-8')
+ elif type == MSG_CMD:
+ cmd = buf.decode('utf-8')
+ elif type == MSG_EOF:
+ assert user is not None
+ assert cmd is not None
+ break
+ else:
+ raise ValueError(f"Unknown message type: {type}")
+ if user == 'root':
+ args = ['/bin/sh']
+ else:
+ args = ['/bin/su', '-', user, '-c', '/bin/sh']
+ result = subprocess.run(
+ args,
+ input=cmd.encode('utf-8'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ sout = io.BufferedWriter(stdout)
+ Message.write(sout, MSG_RC, struct.pack('!i', result.returncode))
+ Message.write(sout, MSG_OUT, result.stdout)
+ Message.write(sout, MSG_ERR, result.stderr)
+ Message.write(sout, MSG_EOF, b'')
+ stdout.flush()
+
+
+def _getuser():
+ return pwd.getpwuid(os.geteuid()).pw_name
+
+
+if __name__ == '__main__':
+ with open(0, 'rb') as stdin, \
+ open(1, 'wb') as stdout, \
+ open(2, 'wb') as stderr:
+ serve(stdin, stdout, stderr)
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..e13146e
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,31 @@
+# content of: tox.ini , put in same dir as setup.py
+[tox]
+envlist = py36, py38, py310
+skip_missing_interpreters = true
+
+[base]
+changedir = test/unittests
+deps =
+ pytest
+ pytest-cov
+commands = py.test -vv --cov=crmsh --cov-config .coveragerc --cov-report term --cov-report html {posargs}
+
+[testenv]
+changedir = {[base]changedir}
+deps = {[base]deps}
+commands = {[base]commands}
+
+[testenv:3.6]
+changedir = {[base]changedir}
+deps = {[base]deps}
+commands = {[base]commands}
+
+[testenv:3.8]
+changedir = {[base]changedir}
+deps = {[base]deps}
+commands = {[base]commands}
+
+[testenv:3.10]
+changedir = {[base]changedir}
+deps = {[base]deps}
+commands = {[base]commands}
diff --git a/update-data-manifest.sh b/update-data-manifest.sh
new file mode 100755
index 0000000..e4de0c9
--- /dev/null
+++ b/update-data-manifest.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Generate the data-manifest file which lists
+# all files which should be installed to /usr/share/crmsh
+target=data-manifest
+[ -f $target ] && (printf "Removing $target..."; rm $target)
+printf "Generating $target..."
+cat <<EOF | sort -df > $target
+version
+$(git ls-files scripts templates utils test)
+EOF
+[ ! -f $target ] && printf "FAILED\n"
+[ -f $target ] && printf "OK\n"
diff --git a/utils/crm_clean.py b/utils/crm_clean.py
new file mode 100755
index 0000000..5bb79f0
--- /dev/null
+++ b/utils/crm_clean.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python3
+import os
+import sys
+import shutil
+errors = []
+mydir = os.path.dirname(os.path.abspath(sys.modules[__name__].__file__))
+
+
+def bad(path):
+ return ((not os.path.isabs(path)) or os.path.dirname(path) == '/' or
+ path.startswith('/var') or path.startswith('/usr') or
+ (not path.startswith(mydir)))
+
+for f in sys.argv[1:]:
+ if bad(f):
+ errors.append("cannot remove %s from %s" % (f, mydir))
+ continue
+ try:
+ if os.path.isfile(f):
+ os.remove(f)
+ elif os.path.isdir(f):
+ if os.path.isfile(os.path.join(f, 'crm_script.debug')):
+ print(open(os.path.join(f, 'crm_script.debug')).read())
+
+ # to check whether this clean request came from health
+ # if it does, delete all except health-report
+ del_flag = 0
+ for x in os.listdir(f):
+ if x.startswith("health-report"):
+ del_flag = 1
+
+ if del_flag == 1:
+ for x in os.listdir(f):
+ if x.startswith("health-report"):
+ continue
+ if os.path.isfile(x):
+ os.remove(x)
+ elif os.path.isdir(x):
+ shutil.rmtree(x)
+ else:
+ shutil.rmtree(f)
+ except OSError as e:
+ errors.append(e)
+if errors:
+ print('\n'.join(errors), file=sys.stderr)
+ sys.exit(1)
diff --git a/utils/crm_init.py b/utils/crm_init.py
new file mode 100644
index 0000000..226ce83
--- /dev/null
+++ b/utils/crm_init.py
@@ -0,0 +1,251 @@
+import os
+import pwd
+import re
+import platform
+import socket
+import crm_script
+
+PACKAGES = ['booth', 'cluster-glue', 'corosync', 'crmsh', 'csync2', 'drbd',
+ 'fence-agents', 'gfs2', 'gfs2-utils', 'hawk', 'ocfs2',
+ 'ocfs2-tools', 'pacemaker', 'pacemaker-mgmt',
+ 'resource-agents', 'sbd']
+SERVICES = ['sshd', 'ntp', 'corosync', 'pacemaker', 'hawk', 'SuSEfirewall2_init']
+SSH_KEY = os.path.expanduser('~/.ssh/id_rsa')
+CSYNC2_KEY = '/etc/csync2/key_hagroup'
+CSYNC2_CFG = '/etc/csync2/csync2.cfg'
+COROSYNC_CONF = '/etc/corosync/corosync.conf'
+SYSCONFIG_SBD = '/etc/sysconfig/sbd'
+SYSCONFIG_FW = '/etc/sysconfig/SuSEfirewall2'
+SYSCONFIG_FW_CLUSTER = '/etc/sysconfig/SuSEfirewall2.d/services/cluster'
+
+
+def rpm_info():
+ 'check installed packages'
+ return crm_script.rpmcheck(PACKAGES)
+
+
+def service_info(service):
+ "Returns information about a given service"
+ active, enabled = 'unknown', 'unknown'
+ rc, out, err = crm_script.call(["/usr/bin/systemctl", "is-enabled", "%s.service" % (service)])
+ if rc in (0, 1, 3) and out:
+ enabled = out.strip()
+ else:
+ return {'name': service, 'error': err.strip()}
+ rc, out, err = crm_script.call(["/usr/bin/systemctl", "is-active", "%s.service" % (service)])
+ if rc in (0, 1, 3) and out:
+ active = out.strip()
+ else:
+ return {'name': service, 'error': err.strip()}
+ return {'name': service, 'active': active, 'enabled': enabled}
+
+
+def services_info():
+ 'check enabled/active services'
+ return [service_info(service) for service in SERVICES]
+
+def get_user():
+ return pwd.getpwuid(os.getuid()).pw_name
+
+def sys_info():
+ 'system information'
+ system, node, release, version, machine, processor = platform.uname()
+ hostname = platform.node().split('.')[0]
+ return {'system': system,
+ 'node': node,
+ 'release': release,
+ 'version': version,
+ 'machine': machine,
+ 'processor': processor,
+ 'user': get_user(),
+ 'hostname': hostname,
+ 'fqdn': socket.getfqdn()}
+
+
+def net_info():
+ ret = {}
+ interfaces = []
+ ret['interfaces'] = interfaces
+ hostname = platform.node().split('.')[0]
+ try:
+ ip = socket.gethostbyname(hostname)
+ ret['hostname'] = {'name': hostname, 'ip': ip}
+ except Exception as e:
+ ret['hostname'] = {'error': str(e)}
+ return ret
+
+
+def files_info():
+ def check(fn):
+ if os.path.isfile(os.path.expanduser(fn)):
+ return os.path.expanduser(fn)
+ return ''
+ return {'ssh_key': check(SSH_KEY),
+ 'csync2_key': check(CSYNC2_KEY),
+ 'csync2_cfg': check(CSYNC2_CFG),
+ 'corosync_conf': check(COROSYNC_CONF),
+ 'sysconfig_sbd': check(SYSCONFIG_SBD),
+ 'sysconfig_fw': check(SYSCONFIG_FW),
+ 'sysconfig_fw_cluster': check(SYSCONFIG_FW_CLUSTER),
+ }
+
+
+def logrotate_info():
+ rc, _, _ = crm_script.call(
+ 'grep -r corosync.conf /etc/logrotate.d',
+ shell=True)
+ return {'corosync.conf': rc == 0}
+
+
+def disk_info():
+ rc, out, err = crm_script.call(['df'], shell=False)
+ if rc == 0:
+ disk_use = []
+ for line in out.split('\n')[1:]:
+ line = line.strip()
+ if line:
+ data = line.split()
+ if len(data) >= 6:
+ disk_use.append((data[5], int(data[4][:-1])))
+ return disk_use
+ return []
+
+
+def info():
+ return {'rpm': rpm_info(),
+ 'services': services_info(),
+ 'system': sys_info(),
+ 'net': net_info(),
+ 'files': files_info(),
+ 'logrotate': logrotate_info(),
+ 'disk': disk_info()}
+
+
+def verify(data):
+ """
+ Given output from info(), verifies
+ as much as possible before init/add.
+ """
+ def check_diskspace():
+ for host, info in data.items():
+ for mount, percent in info['disk']:
+ interesting = (mount == '/' or
+ mount.startswith('/var/log') or
+ mount.startswith('/tmp'))
+ if interesting and percent > 90:
+ crm_script.exit_fail("Not enough space on %s:%s" % (host, mount))
+
+ def check_services():
+ for host, info in data.items():
+ for svc in info['services']:
+ if svc['name'] == 'pacemaker' and svc['active'] == 'active':
+ crm_script.exit_fail("%s already running pacemaker" % (host))
+ if svc['name'] == 'corosync' and svc['active'] == 'active':
+ crm_script.exit_fail("%s already running corosync" % (host))
+
+ def verify_host(host, info):
+ if host != info['system']['hostname']:
+ crm_script.exit_fail("Hostname mismatch: %s is not %s" %
+ (host, info['system']['hostname']))
+
+ def compare_system(systems):
+ def check(value, msg):
+ vals = set([system[value] for host, system in systems])
+ if len(vals) > 1:
+ info = ', '.join('%s: %s' % (h, system[value]) for h, system in systems)
+ crm_script.exit_fail("%s: %s" % (msg, info))
+
+ check('machine', 'Architecture differs')
+ #check('release', 'Kernel release differs')
+ #check('distname', 'Distribution differs')
+ #check('distver', 'Distribution version differs')
+ #check('version', 'Kernel version differs')
+
+ for host, info in data.items():
+ verify_host(host, info)
+
+ compare_system((h, info['system']) for h, info in data.items())
+
+ check_diskspace()
+ check_services()
+
+
+# common functions to initialize a cluster node
+
+
+def is_service_enabled(name):
+ info = service_info(name)
+ if info.get('name') == name and info.get('enabled') == 'enabled':
+ return True
+ return False
+
+
+def is_service_active(name):
+ info = service_info(name)
+ if info.get('name') == name and info.get('active') == 'active':
+ return True
+ return False
+
+
+def install_packages(packages):
+ for pkg in packages:
+ try:
+ crm_script.package(pkg, 'latest')
+ except Exception as e:
+ crm_script.exit_fail("Failed to install %s: %s" % (pkg, e))
+
+
+def configure_firewall():
+ _SUSE_FW_TEMPLATE = """## Name: HAE cluster ports
+## Description: opens ports for HAE cluster services
+TCP="%(tcp)s"
+UDP="%(udp)s"
+"""
+ corosync_mcastport = crm_script.param('mcastport')
+ if not corosync_mcastport:
+ rc, out, err = crm_script.call(['crm', 'corosync', 'get', 'totem.interface.mcastport'])
+ if rc == 0:
+ corosync_mcastport = out.strip()
+ FW = '/etc/sysconfig/SuSEfirewall2'
+ FW_CLUSTER = '/etc/sysconfig/SuSEfirewall2.d/services/cluster'
+
+ tcp_ports = '30865 5560 7630 21064'
+ udp_ports = '%s %s' % (corosync_mcastport, int(corosync_mcastport) - 1)
+
+ if is_service_enabled('SuSEfirewall2'):
+ if os.path.isfile(FW_CLUSTER):
+ tmpl = open(FW_CLUSTER).read()
+ tmpl = re.sub(r'^TCP="(.*)"', 'TCP="%s"' % (tcp_ports), tmpl, flags=re.M)
+ tmpl = re.sub(r'^UDP="(.*)"', 'UDP="%s"' % (udp_ports), tmpl, flags=re.M)
+ with open(FW_CLUSTER, 'w') as f:
+ f.write(tmpl)
+ elif os.path.isdir(os.path.dirname(FW_CLUSTER)):
+ with open(FW_CLUSTER, 'w') as fwc:
+ fwc.write(_SUSE_FW_TEMPLATE % {'tcp': tcp_ports,
+ 'udp': udp_ports})
+ else:
+ # neither the cluster file nor the services
+ # directory exists
+ crm_script.exit_fail("SUSE firewall is configured but %s does not exist" %
+ os.path.dirname(FW_CLUSTER))
+
+ # add cluster to FW_CONFIGURATIONS_EXT
+ if os.path.isfile(FW):
+ txt = open(FW).read()
+ m = re.search(r'^FW_CONFIGURATIONS_EXT="(.*)"', txt, re.M)
+ if m:
+ services = m.group(1).split()
+ if 'cluster' not in services:
+ services.append('cluster')
+ txt = re.sub(r'^FW_CONFIGURATIONS_EXT="(.*)"',
+ r'FW_CONFIGURATIONS_EXT="%s"' % (' '.join(services)),
+ txt,
+ flags=re.M)
+ else:
+ txt += '\nFW_CONFIGURATIONS_EXT="cluster"'
+ with open(FW, 'w') as fw:
+ fw.write(txt)
+ if is_service_active('SuSEfirewall2'):
+ crm_script.service('SuSEfirewall2', 'restart')
+
+ # TODO: other platforms
diff --git a/utils/crm_pkg.py b/utils/crm_pkg.py
new file mode 100755
index 0000000..b775004
--- /dev/null
+++ b/utils/crm_pkg.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python3
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import sys
+import subprocess
+import json
+
+
+DRY_RUN = False
+
+
+def get_platform():
+ return os.uname()[0]
+
+
+def fail(msg):
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+
+def run(cmd):
+ proc = subprocess.Popen(cmd,
+ shell=False,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = proc.communicate(None)
+ return proc.returncode, out, err
+
+
+def is_program(prog):
+ """Is this program available?"""
+ for p in os.getenv("PATH").split(os.pathsep):
+ filename = os.path.join(p, prog)
+ if os.path.isfile(filename) and os.access(filename, os.X_OK):
+ return filename
+ return None
+
+
+class PackageManager(object):
+ def dispatch(self, name, state):
+ if state in ('installed', 'present'):
+ return self.present(name)
+ elif state in ('absent', 'removed'):
+ return self.absent(name)
+ elif state == 'latest':
+ return self.latest(name)
+ fail(msg="Unknown state: " + state)
+
+ def present(self, name):
+ raise NotImplementedError
+
+ def latest(self, name):
+ raise NotImplementedError
+
+ def absent(self, name):
+ raise NotImplementedError
+
+
+class Zypper(PackageManager):
+ def __init__(self):
+ self._rpm = is_program('rpm')
+ self._zyp = is_program('zypper')
+ if self._rpm is None or self._zyp is None:
+ raise OSError("Missing tools: %s, %s" % (self._rpm, self._zyp))
+
+ def get_version(self, name):
+ cmd = [self._rpm, '-q', name]
+ rc, stdout, stderr = run(cmd)
+ if rc == 0:
+ for line in stdout.splitlines():
+ if name in line.decode('utf-8'):
+ return line.strip()
+ return None
+
+ def is_installed(self, name):
+ if not isinstance(self._rpm, str):
+ raise IOError(str(self._rpm))
+ if not isinstance(name, str):
+ raise IOError(str(name))
+ cmd = [self._rpm, '--query', '--info', name]
+ rc, stdout, stderr = run(cmd)
+ return rc == 0
+
+ def present(self, name):
+ if self.is_installed(name):
+ return (0, b'', b'', False)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ cmd = [self._zyp,
+ '--non-interactive',
+ '--no-refresh',
+ 'install',
+ '--auto-agree-with-licenses',
+ name]
+ rc, stdout, stderr = run(cmd)
+ changed = rc == 0
+ return (rc, stdout, stderr, changed)
+
+ def latest(self, name):
+ if not self.is_installed(name):
+ return self.present(name)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ pre_version = self.get_version(name)
+ cmd = [self._zyp,
+ '--non-interactive',
+ '--no-refresh',
+ 'update',
+ '--auto-agree-with-licenses',
+ name]
+ rc, stdout, stderr = run(cmd)
+ post_version = self.get_version(name)
+ changed = pre_version != post_version
+ return (rc, stdout, stderr, changed)
+
+ def absent(self, name):
+ if not self.is_installed(name):
+ return (0, b'', b'', False)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ cmd = [self._zyp,
+ '--non-interactive',
+ 'remove',
+ name]
+ rc, stdout, stderr = run(cmd)
+ changed = rc == 0
+ return (rc, stdout, stderr, changed)
+
+
+class Yum(PackageManager):
+ def __init__(self):
+ self._rpm = is_program('rpm')
+ self._yum = is_program('yum')
+
+ def get_version(self, name):
+ cmd = [self._rpm, '-q', name]
+ rc, stdout, stderr = run(cmd)
+ if rc == 0:
+ for line in stdout.splitlines():
+ if name in line:
+ return line.strip()
+ return None
+
+ def is_installed(self, name):
+ cmd = [self._rpm, '--query', '--info', name]
+ rc, stdout, stderr = run(cmd)
+ return rc == 0
+
+ def present(self, name):
+ if self.is_installed(name):
+ return (0, b'', b'', False)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ cmd = [self._yum,
+ '--assumeyes',
+ '-d', '2',
+ 'install',
+ name]
+ rc, stdout, stderr = run(cmd)
+ changed = rc == 0
+ return (rc, stdout, stderr, changed)
+
+ def latest(self, name):
+ if not self.is_installed(name):
+ return self.present(name)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ pre_version = self.get_version(name)
+ cmd = [self._yum,
+ '--assumeyes',
+ '-d', '2',
+ 'update',
+ name]
+ rc, stdout, stderr = run(cmd)
+ post_version = self.get_version(name)
+ changed = pre_version != post_version
+ return (rc, stdout, stderr, changed)
+
+ def absent(self, name):
+ if not self.is_installed(name):
+ return (0, b'', b'', False)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ cmd = [self._yum,
+ '--assumeyes',
+ '-d', '2',
+ 'erase',
+ name]
+ rc, stdout, stderr = run(cmd)
+ changed = rc == 0
+ return (rc, stdout, stderr, changed)
+
+
+class Apt(PackageManager):
+ def __init__(self):
+ self._apt = is_program('apt-get')
+ self._dpkg = is_program('dpkg')
+ if self._apt is None or self._dpkg is None:
+ raise OSError("Missing tools: %s, %s" % (self._apt, self._dpkg))
+
+ def get_version(self, name):
+ cmd = [self._dpkg, '--status', name]
+ rc, stdout, stderr = run(cmd)
+ if rc == 0:
+ for line in stdout.splitlines():
+ info = line.decode('utf-8').split(':', 1)
+ if len(info) == 2 and info[0] == 'Version':
+ return info[1].strip()
+ return None
+
+ def is_installed(self, name):
+ cmd = [self._dpkg, '--status', name]
+ rc, stdout, stderr = run(cmd)
+ if rc == 0:
+ for line in stdout.splitlines():
+ info = line.decode('utf-8').split(':', 1)
+ if len(info) == 2 and info[0] == 'Status':
+ return info[1].strip().endswith('installed')
+ return False
+
+ def present(self, name):
+ if self.is_installed(name):
+ return (0, b'', b'', False)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ cmd = [self._apt,
+ '--assume-yes',
+ '--quiet',
+ 'install',
+ name]
+ rc, stdout, stderr = run(cmd)
+ changed = rc == 0
+ return (rc, stdout, stderr, changed)
+
+ def latest(self, name):
+ if not self.is_installed(name):
+ return self.present(name)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ pre_version = self.get_version(name)
+ cmd = [self._apt,
+ '--assume-yes',
+ '--quiet',
+ '--only-upgrade',
+ 'install',
+ name]
+ rc, stdout, stderr = run(cmd)
+ post_version = self.get_version(name)
+ changed = pre_version != post_version
+ return (rc, stdout, stderr, changed)
+
+ def absent(self, name):
+ if not self.is_installed(name):
+ return (0, b'', b'', False)
+
+ if DRY_RUN:
+ return (0, b'', b'', True)
+
+ cmd = [self._apt,
+ '--assume-yes',
+ '--quiet',
+ 'purge',
+ name]
+ rc, stdout, stderr = run(cmd)
+ changed = rc == 0
+ return (rc, stdout, stderr, changed)
+
+
+class Pacman(PackageManager):
+ pass
+
+
+def manage_package(pkg, state):
+ """
+ Gathers version and release information about a package.
+ """
+ if pkg is None:
+ raise IOError("PKG IS NONE")
+ pf = get_platform()
+ if pf != 'Linux':
+ fail(msg="Unsupported platform: " + pf)
+ managers = {
+ 'zypper': Zypper,
+ 'yum': Yum,
+ 'apt-get': Apt,
+ #'pacman': Pacman
+ }
+ for name, mgr in managers.items():
+ exe = is_program(name)
+ if exe:
+ rc, stdout, stderr, changed = mgr().dispatch(pkg, state)
+ return {'rc': rc,
+ 'stdout': stdout.decode('utf-8'),
+ 'stderr': stderr.decode('utf-8'),
+ 'changed': changed
+ }
+ fail(msg="No supported package manager found")
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="(Semi)-Universal package installer",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+ parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
+ help="Only check if changes would be made")
+
+ parser.add_argument('-n', '--name', metavar='name', type=str,
+ help="Name of package")
+
+ parser.add_argument('-s', '--state', metavar='state', type=str,
+ help="Desired state (present|latest|removed)", default="present")
+
+ args = parser.parse_args()
+ global DRY_RUN
+ DRY_RUN = args.dry_run
+ if not args.name or not args.state:
+ raise IOError("Bad arguments: %s" % (sys.argv))
+ data = manage_package(args.name, args.state)
+ print(json.dumps(str(data)))
+
+main()
diff --git a/utils/crm_rpmcheck.py b/utils/crm_rpmcheck.py
new file mode 100755
index 0000000..4901106
--- /dev/null
+++ b/utils/crm_rpmcheck.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python3
+# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
+# See COPYING for license information.
+
+import os
+import sys
+import json
+import subprocess
+
+
+def run(cmd):
+ proc = subprocess.Popen(cmd,
+ shell=False,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = proc.communicate(None)
+ proc.wait()
+ return proc.returncode, out.decode('utf-8'), err.decode('utf-8')
+
+
+def package_data(pkg):
+ """
+ Gathers version and release information about a package.
+ """
+ if os.path.isfile('/bin/rpm'):
+ return rpm_package_data(pkg)
+
+ if os.path.isfile('/usr/bin/dpkg'):
+ return dpkg_package_data(pkg)
+
+ return {'name': pkg, 'error': "unknown package manager"}
+
+
+def rpm_package_data(pkg):
+ """
+ Gathers version and release information about an RPM package.
+ """
+ _qfmt = 'version: %{VERSION}\nrelease: %{RELEASE}\n'
+ rc, out, err = run(['/bin/rpm', '-q', '--queryformat=' + _qfmt, pkg])
+ if rc == 0:
+ data = {'name': pkg}
+ for line in out.split('\n'):
+ info = line.split(':', 1)
+ if len(info) == 2:
+ data[info[0].strip()] = info[1].strip()
+ return data
+ else:
+ return {'name': pkg, 'error': "package not installed"}
+
+
+def dpkg_package_data(pkg):
+ """
+ Gathers version and release information about a DPKG package.
+ """
+ rc, out, err = run(['/usr/bin/dpkg', '--status', pkg])
+ if rc == 0:
+ data = {'name': pkg}
+ for line in out.split('\n'):
+ info = line.split(':', 1)
+ if len(info) == 2:
+ data[info[0].strip().lower()] = info[1].strip()
+ return data
+ else:
+ return {'name': pkg, 'error': "package not installed"}
+
+
+def main():
+ data = [package_data(pkg) for pkg in sys.argv[1:]]
+ print(json.dumps(data))
+
+main()
diff --git a/utils/crm_script.py b/utils/crm_script.py
new file mode 100644
index 0000000..2341539
--- /dev/null
+++ b/utils/crm_script.py
@@ -0,0 +1,190 @@
+import os
+import sys
+import getpass
+import select
+import subprocess as proc
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+_input = None
+
+# read stdin, if there's anything to read
+_stdin_data = {}
+while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
+ line = sys.stdin.readline()
+ if line:
+ d = line.split(':', 1)
+ if len(d) == 2:
+ _stdin_data[d[0].strip()] = d[1].strip()
+ else:
+ break
+
+
+def decode_utf8(s):
+ """
+ Convert the given byte sequence to a
+ utf8 string.
+ """
+ if s is None or isinstance(s, str):
+ return s
+ return s.decode('utf-8', 'ignore')
+
+
+def host():
+ return os.uname()[1]
+
+
+def get_input():
+ global _input
+ if _input is None:
+ _input = json.load(open('./script.input'))
+ return _input
+
+
+def parameters():
+ return get_input()[0]
+
+
+def param(name):
+ return parameters().get(name)
+
+
+def output(step_idx):
+ if step_idx < len(get_input()):
+ return get_input()[step_idx]
+ return {}
+
+
+def exit_fail(msg):
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+
+def exit_ok(data):
+ print(json.dumps(data))
+ sys.exit(0)
+
+
+def is_true(s):
+ if s in (True, False):
+ return s
+ return isinstance(s, str) and s.lower() in ('yes', 'true', '1', 'on')
+
+
+_debug_enabled = None
+
+
+def debug_enabled():
+ global _debug_enabled
+ if _debug_enabled is None:
+ _debug_enabled = is_true(param('debug'))
+ return _debug_enabled
+
+
+def info(msg):
+ "writes msg to log"
+ with open('./crm_script.debug', 'a') as dbglog:
+ dbglog.write('%s' % (msg))
+
+
+def debug(msg):
+ "writes msg to log and syslog if debug is enabled"
+ if debug_enabled():
+ try:
+ with open('./crm_script.debug', 'a') as dbglog:
+ dbglog.write('%s\n' % (msg))
+ import syslog
+ syslog.openlog("crmsh", 0, syslog.LOG_USER)
+ syslog.syslog(syslog.LOG_NOTICE, str(msg).encode('utf8'))
+ except:
+ pass
+
+
+def call(cmd, shell=False):
+ debug("crm_script(call): %s" % (cmd))
+ p = proc.Popen(cmd, shell=shell, stdin=None, stdout=proc.PIPE, stderr=proc.PIPE)
+ out, err = p.communicate()
+ return p.returncode, decode_utf8(out).strip(), decode_utf8(err).strip()
+
+
+def use_sudo():
+ return getpass.getuser() != 'root' and is_true(param('sudo')) and _stdin_data.get('sudo')
+
+
+def sudo_call(cmd, shell=False):
+ if not use_sudo():
+ return call(cmd, shell=shell)
+ debug("crm_script(sudo_call): %s" % (cmd))
+ os.unsetenv('SSH_ASKPASS')
+ call(['sudo', '-k'], shell=False)
+ sudo_prompt = 'crm_script_sudo_prompt'
+ if isinstance(cmd, str):
+ cmd = "sudo -H -S -p '%s' %s" % (sudo_prompt, cmd)
+ else:
+ cmd = ['sudo', '-H', '-S', '-p', sudo_prompt] + cmd
+ p = proc.Popen(cmd, shell=shell, stdin=proc.PIPE, stdout=proc.PIPE, stderr=proc.PIPE)
+ sudo_pass = "%s\n" % (_stdin_data.get('sudo', 'linux'))
+ debug("CMD(SUDO): %s" % (str(cmd)))
+ out, err = p.communicate(input=sudo_pass)
+ return p.returncode, out.strip(), err.strip()
+
+
+def service(name, action):
+ if action.startswith('is-'):
+ return call(['/usr/bin/systemctl', action, name + '.service'])
+ return sudo_call(['/usr/bin/systemctl', action, name + '.service'])
+
+
+def package(name, state):
+ rc, out, err = sudo_call(['./crm_pkg.py', '-n', name, '-s', state])
+ if rc != 0:
+ raise IOError("%s / %s" % (out, err))
+ outp = json.loads(decode_utf8(out))
+ if isinstance(outp, dict) and 'rc' in outp:
+ rc = int(outp['rc'])
+ if rc != 0:
+ raise IOError("(rc=%s) %s%s" % (rc, outp.get('stdout', ''), outp.get('stderr', '')))
+ return outp
+
+
+def check_package(name, state):
+ rc, out, err = call(['./crm_pkg.py', '--dry-run', '-n', name, '-s', state])
+ if rc != 0:
+ raise IOError(err)
+ outp = json.loads(out)
+ if isinstance(outp, dict) and 'rc' in outp:
+ rc = int(outp['rc'])
+ if rc != 0:
+ raise IOError("(rc=%s) %s%s" % (rc, outp.get('stdout', ''), outp.get('stderr', '')))
+ return outp
+
+
+def rpmcheck(names):
+ rc, out, err = call(['./crm_rpmcheck.py'] + names)
+ if rc != 0:
+ raise IOError(err)
+ return json.loads(out)
+
+
+def save_template(template, dest, **kwargs):
+ '''
+ 1. Reads a template from <template>,
+ 2. Replaces all template variables with those in <kwargs> and
+ 3. writes the resulting file to <dest>
+ '''
+ import re
+ tmpl = open(template).read()
+ keys = re.findall(r'%\((\w+)\)s', tmpl, re.MULTILINE)
+ missing_keys = set(keys) - set(kwargs.keys())
+ if missing_keys:
+ raise ValueError("Missing template arguments: %s" % ', '.join(missing_keys))
+ tmpl = tmpl % kwargs
+ try:
+ with open(dest, 'w') as f:
+ f.write(tmpl)
+ except Exception as e:
+ raise IOError("Failed to write %s from template %s: %s" % (dest, template, e))
+ debug("crm_script(save_template): wrote %s" % (dest))
+
diff --git a/version.in b/version.in
new file mode 100644
index 0000000..a24f987
--- /dev/null
+++ b/version.in
@@ -0,0 +1 @@
+@PACKAGE_VERSION@